summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAmir Ayupov <aaupov@fb.com>2024-05-22 11:59:37 +0800
committerWang Pengcheng <wangpengcheng.pp@bytedance.com>2024-05-22 11:59:37 +0800
commit66c619ac3ec6f644d051914fe402c84177ab9437 (patch)
tree9140fdac11dbe445151c892dc2c450bfcd6c7c86
parent591803e81f7ad7cd07ca129187504a867927a374 (diff)
parent97025bd9d5b32f984f07d6ae20a3ce6ddb3fbe2a (diff)
Created using spr 1.3.6-beta.1 [skip ci]
-rwxr-xr-x.ci/generate-buildkite-pipeline-premerge2
-rw-r--r--bolt/include/bolt/Core/BinaryContext.h3
-rw-r--r--bolt/include/bolt/Passes/BinaryPasses.h6
-rw-r--r--bolt/include/bolt/Passes/StokeInfo.h4
-rw-r--r--bolt/include/bolt/Profile/DataAggregator.h5
-rw-r--r--bolt/lib/Core/BinaryContext.cpp4
-rw-r--r--bolt/lib/Core/BinaryFunction.cpp3
-rw-r--r--bolt/lib/Passes/BinaryPasses.cpp12
-rw-r--r--bolt/lib/Profile/DataAggregator.cpp18
-rw-r--r--bolt/lib/Profile/YAMLProfileWriter.cpp7
-rw-r--r--bolt/lib/Rewrite/RewriteInstance.cpp63
-rw-r--r--bolt/runtime/instr.cpp4
-rw-r--r--bolt/test/X86/bolt-address-translation-yaml.test5
-rw-r--r--bolt/test/X86/ignored-interprocedural-reference.s49
-rw-r--r--bolt/test/X86/register-fragments-bolt-symbols.s8
-rwxr-xr-xbolt/test/link_fdata.py3
-rwxr-xr-xbolt/test/runtime/X86/hot-end-symbol.s3
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp4
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp5
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp8
-rw-r--r--clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp3
-rw-r--r--clang-tools-extra/clangd/Hover.cpp8
-rw-r--r--clang-tools-extra/clangd/test/infinite-instantiation.test5
-rw-r--r--clang-tools-extra/docs/ReleaseNotes.rst3
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init-no-crash.cpp8
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init.cpp6
-rw-r--r--clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-outofline.cpp30
-rw-r--r--clang/cmake/caches/HLSL.cmake2
-rw-r--r--clang/docs/ReleaseNotes.rst7
-rw-r--r--clang/docs/tools/clang-formatted-files.txt3
-rw-r--r--clang/include/clang/AST/ASTContext.h2
-rw-r--r--clang/include/clang/AST/ASTNodeTraverser.h2
-rw-r--r--clang/include/clang/AST/DeclTemplate.h17
-rw-r--r--clang/include/clang/AST/ExprCXX.h19
-rw-r--r--clang/include/clang/AST/OpenACCClause.h29
-rw-r--r--clang/include/clang/AST/RecursiveASTVisitor.h2
-rw-r--r--clang/include/clang/AST/Type.h5
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/CNFFormula.h179
-rw-r--r--clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h7
-rw-r--r--clang/include/clang/Basic/DiagnosticDriverKinds.td3
-rw-r--r--clang/include/clang/Basic/DiagnosticInstallAPIKinds.td2
-rw-r--r--clang/include/clang/Basic/DiagnosticSemaKinds.td32
-rw-r--r--clang/include/clang/Basic/OpenACCClauses.def1
-rw-r--r--clang/include/clang/Basic/OpenACCKinds.h36
-rw-r--r--clang/include/clang/Format/Format.h13
-rw-r--r--clang/include/clang/Parse/Parser.h6
-rw-r--r--clang/include/clang/Sema/Sema.h86
-rw-r--r--clang/include/clang/Sema/SemaOpenACC.h29
-rw-r--r--clang/include/clang/Sema/SemaPseudoObject.h40
-rw-r--r--clang/lib/ARCMigrate/ARCMT.cpp3
-rw-r--r--clang/lib/ARCMigrate/ObjCMT.cpp3
-rw-r--r--clang/lib/AST/ASTContext.cpp35
-rw-r--r--clang/lib/AST/ASTImporter.cpp6
-rw-r--r--clang/lib/AST/DeclPrinter.cpp3
-rw-r--r--clang/lib/AST/DeclTemplate.cpp17
-rw-r--r--clang/lib/AST/ExprCXX.cpp15
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.cpp78
-rw-r--r--clang/lib/AST/Interp/ByteCodeExprGen.h1
-rw-r--r--clang/lib/AST/Interp/Interp.h2
-rw-r--r--clang/lib/AST/Interp/Pointer.h6
-rw-r--r--clang/lib/AST/JSONNodeDumper.cpp2
-rw-r--r--clang/lib/AST/ODRDiagsEmitter.cpp12
-rw-r--r--clang/lib/AST/ODRHash.cpp2
-rw-r--r--clang/lib/AST/OpenACCClause.cpp20
-rw-r--r--clang/lib/AST/StmtProfile.cpp6
-rw-r--r--clang/lib/AST/TextNodeDumper.cpp4
-rw-r--r--clang/lib/AST/Type.cpp8
-rw-r--r--clang/lib/AST/TypePrinter.cpp4
-rw-r--r--clang/lib/Analysis/FlowSensitive/CMakeLists.txt1
-rw-r--r--clang/lib/Analysis/FlowSensitive/CNFFormula.cpp303
-rw-r--r--clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp482
-rw-r--r--clang/lib/Basic/Targets/Mips.h17
-rw-r--r--clang/lib/CodeGen/CGAtomic.cpp14
-rw-r--r--clang/lib/CodeGen/CGBlocks.cpp2
-rw-r--r--clang/lib/CodeGen/CGBuiltin.cpp10
-rw-r--r--clang/lib/CodeGen/CGCall.cpp87
-rw-r--r--clang/lib/CodeGen/CGClass.cpp18
-rw-r--r--clang/lib/CodeGen/CGDecl.cpp34
-rw-r--r--clang/lib/CodeGen/CGDeclCXX.cpp2
-rw-r--r--clang/lib/CodeGen/CGException.cpp3
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp93
-rw-r--r--clang/lib/CodeGen/CGExprAgg.cpp45
-rw-r--r--clang/lib/CodeGen/CGExprCXX.cpp16
-rw-r--r--clang/lib/CodeGen/CGExprComplex.cpp10
-rw-r--r--clang/lib/CodeGen/CGExprScalar.cpp20
-rw-r--r--clang/lib/CodeGen/CGNonTrivialStruct.cpp18
-rw-r--r--clang/lib/CodeGen/CGObjC.cpp20
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntime.cpp91
-rw-r--r--clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp16
-rw-r--r--clang/lib/CodeGen/CGStmt.cpp13
-rw-r--r--clang/lib/CodeGen/CGStmtOpenMP.cpp156
-rw-r--r--clang/lib/CodeGen/CGValue.h13
-rw-r--r--clang/lib/CodeGen/CodeGenFunction.cpp4
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp21
-rw-r--r--clang/lib/CodeGen/CodeGenTypes.cpp3
-rw-r--r--clang/lib/CodeGen/Targets/AArch64.cpp13
-rw-r--r--clang/lib/CodeGen/Targets/NVPTX.cpp2
-rw-r--r--clang/lib/CodeGen/Targets/X86.cpp2
-rw-r--r--clang/lib/Driver/ToolChains/AIX.cpp8
-rw-r--r--clang/lib/ExtractAPI/DeclarationFragments.cpp8
-rw-r--r--clang/lib/Format/Format.cpp54
-rw-r--r--clang/lib/Format/FormatTokenSource.h2
-rw-r--r--clang/lib/Format/MacroExpander.cpp12
-rw-r--r--clang/lib/Format/Macros.h24
-rw-r--r--clang/lib/Format/SortJavaScriptImports.cpp18
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp30
-rw-r--r--clang/lib/Frontend/InitPreprocessor.cpp2
-rw-r--r--clang/lib/Index/IndexDecl.cpp3
-rw-r--r--clang/lib/Parse/ParseDecl.cpp33
-rw-r--r--clang/lib/Parse/ParseDeclCXX.cpp50
-rw-r--r--clang/lib/Parse/ParseOpenACC.cpp30
-rw-r--r--clang/lib/Sema/CMakeLists.txt2
-rw-r--r--clang/lib/Sema/HLSLExternalSemaSource.cpp48
-rw-r--r--clang/lib/Sema/Sema.cpp2
-rw-r--r--clang/lib/Sema/SemaCXXScopeSpec.cpp8
-rw-r--r--clang/lib/Sema/SemaDecl.cpp112
-rw-r--r--clang/lib/Sema/SemaDeclCXX.cpp262
-rw-r--r--clang/lib/Sema/SemaExpr.cpp122
-rw-r--r--clang/lib/Sema/SemaLookup.cpp9
-rw-r--r--clang/lib/Sema/SemaOpenACC.cpp154
-rw-r--r--clang/lib/Sema/SemaPseudoObject.cpp75
-rw-r--r--clang/lib/Sema/SemaRISCV.cpp (renamed from clang/lib/Sema/SemaRISCVVectorLookup.cpp)0
-rw-r--r--clang/lib/Sema/SemaTemplate.cpp302
-rw-r--r--clang/lib/Sema/SemaTemplateDeduction.cpp10
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiate.cpp11
-rw-r--r--clang/lib/Sema/SemaTemplateInstantiateDecl.cpp9
-rw-r--r--clang/lib/Sema/SemaTemplateVariadic.cpp2
-rw-r--r--clang/lib/Sema/TreeTransform.h50
-rw-r--r--clang/lib/Serialization/ASTReader.cpp8
-rw-r--r--clang/lib/Serialization/ASTReaderDecl.cpp3
-rw-r--r--clang/lib/Serialization/ASTReaderStmt.cpp1
-rw-r--r--clang/lib/Serialization/ASTWriter.cpp8
-rw-r--r--clang/lib/Serialization/ASTWriterDecl.cpp2
-rw-r--r--clang/lib/Serialization/ASTWriterStmt.cpp2
-rw-r--r--clang/test/AST/Interp/arrays.cpp10
-rw-r--r--clang/test/AST/Interp/cxx11.cpp16
-rw-r--r--clang/test/AST/Interp/functions.cpp6
-rw-r--r--clang/test/AST/Interp/records.cpp10
-rw-r--r--clang/test/AST/Interp/sycl.cpp9
-rw-r--r--clang/test/AST/ast-dump-ctad-alias.cpp20
-rw-r--r--clang/test/AST/ast-print-openacc-compute-construct.cpp28
-rw-r--r--clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp10
-rw-r--r--clang/test/CXX/class.derived/class.derived.general/p2.cpp116
-rw-r--r--clang/test/CXX/class/class.mfct/class.mfct.non-static/p3.cpp91
-rw-r--r--clang/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p1.cpp24
-rw-r--r--clang/test/CXX/temp/temp.spec/temp.expl.spec/p12.cpp70
-rw-r--r--clang/test/CodeGen/aarch64-byval-temp.c45
-rw-r--r--clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c2
-rw-r--r--clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp2
-rw-r--r--clang/test/CodeGen/aarch64-varargs.c30
-rw-r--r--clang/test/CodeGen/darwin-target-variant.c2
-rw-r--r--clang/test/CodeGen/fat-lto-objects.c2
-rw-r--r--clang/test/CodeGen/functions.c12
-rw-r--r--clang/test/CodeGen/nofpclass.c22
-rw-r--r--clang/test/CodeGenCXX/wasm-eh.cpp4
-rw-r--r--clang/test/CodeGenCXX/windows-implicit-dllexport-template-specialization.cpp13
-rw-r--r--clang/test/CodeGenCXX/windows-itanium-dllexport.cpp18
-rw-r--r--clang/test/CodeGenOpenCLCXX/array-type-infinite-loop.clcpp25
-rw-r--r--clang/test/CoverageMapping/mcdc-system-headers.cpp4
-rw-r--r--clang/test/Driver/tocdata-cc1.c17
-rw-r--r--clang/test/InstallAPI/alias_list.test2
-rw-r--r--clang/test/InstallAPI/exclusive-passes-2.test9
-rw-r--r--clang/test/InstallAPI/exclusive-passes-3.test86
-rw-r--r--clang/test/InstallAPI/exclusive-passes.test15
-rw-r--r--clang/test/InstallAPI/invalid-exclusive-passes.test33
-rw-r--r--clang/test/OpenMP/threadprivate_codegen.cpp3686
-rw-r--r--clang/test/PCH/pack_indexing.cpp4
-rw-r--r--clang/test/Parser/altivec.c24
-rw-r--r--clang/test/Parser/cxx-altivec.cpp24
-rw-r--r--clang/test/Parser/lax-conv.cpp52
-rw-r--r--clang/test/ParserOpenACC/parse-clauses.c26
-rw-r--r--clang/test/Preprocessor/riscv-target-features.c36
-rw-r--r--clang/test/Sema/builtins.c8
-rw-r--r--clang/test/SemaCXX/cxx20-ctad-type-alias.cpp5
-rw-r--r--clang/test/SemaCXX/cxx2c-pack-indexing.cpp8
-rw-r--r--clang/test/SemaCXX/warn-thread-safety-analysis.cpp10
-rw-r--r--clang/test/SemaOpenACC/compute-construct-attach-clause.c2
-rw-r--r--clang/test/SemaOpenACC/compute-construct-clause-ast.cpp248
-rw-r--r--clang/test/SemaOpenACC/compute-construct-copy-clause.c8
-rw-r--r--clang/test/SemaOpenACC/compute-construct-copy-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-copyin-clause.c10
-rw-r--r--clang/test/SemaOpenACC/compute-construct-copyin-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-copyout-clause.c10
-rw-r--r--clang/test/SemaOpenACC/compute-construct-copyout-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-create-clause.c10
-rw-r--r--clang/test/SemaOpenACC/compute-construct-create-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-device_type-clause.c2
-rw-r--r--clang/test/SemaOpenACC/compute-construct-deviceptr-clause.c2
-rw-r--r--clang/test/SemaOpenACC/compute-construct-firstprivate-clause.c8
-rw-r--r--clang/test/SemaOpenACC/compute-construct-firstprivate-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-no_create-clause.c8
-rw-r--r--clang/test/SemaOpenACC/compute-construct-no_create-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-present-clause.c8
-rw-r--r--clang/test/SemaOpenACC/compute-construct-present-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-private-clause.c10
-rw-r--r--clang/test/SemaOpenACC/compute-construct-private-clause.cpp16
-rw-r--r--clang/test/SemaOpenACC/compute-construct-reduction-clause.c107
-rw-r--r--clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp175
-rw-r--r--clang/test/SemaTemplate/cwg2398.cpp15
-rw-r--r--clang/test/SemaTemplate/dependent-names.cpp14
-rw-r--r--clang/test/SemaTemplate/destructor-template.cpp14
-rw-r--r--clang/test/SemaTemplate/ms-function-specialization-class-scope.cpp52
-rw-r--r--clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp12
-rw-r--r--clang/test/SemaTemplate/typo-dependent-name.cpp7
-rw-r--r--clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp2
-rw-r--r--clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp2
-rw-r--r--clang/tools/clang-format/ClangFormat.cpp22
-rw-r--r--clang/tools/clang-installapi/InstallAPIOpts.td3
-rw-r--r--clang/tools/clang-installapi/Options.cpp74
-rw-r--r--clang/tools/clang-installapi/Options.h2
-rw-r--r--clang/tools/libclang/CIndex.cpp11
-rw-r--r--clang/unittests/AST/ASTImporterTest.cpp2
-rw-r--r--clang/unittests/AST/Interp/Descriptor.cpp24
-rw-r--r--clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp6
-rw-r--r--clang/unittests/Format/CleanupTest.cpp14
-rw-r--r--clang/unittests/Format/DefinitionBlockSeparatorTest.cpp29
-rw-r--r--clang/unittests/Format/FormatTest.cpp10
-rw-r--r--clang/unittests/Format/FormatTestBase.h27
-rw-r--r--clang/unittests/Format/FormatTestCSharp.cpp6
-rw-r--r--clang/unittests/Format/FormatTestJS.cpp12
-rw-r--r--clang/unittests/Format/FormatTestJson.cpp13
-rw-r--r--clang/unittests/Format/FormatTestProto.cpp10
-rw-r--r--clang/unittests/Format/FormatTestRawStrings.cpp5
-rw-r--r--clang/unittests/Format/FormatTestSelective.cpp4
-rw-r--r--clang/unittests/Format/FormatTestTableGen.cpp14
-rw-r--r--clang/unittests/Format/FormatTestUtils.h2
-rw-r--r--clang/unittests/Format/FormatTestVerilog.cpp2
-rw-r--r--clang/unittests/Format/FormatTokenSourceTest.cpp5
-rw-r--r--clang/unittests/Format/MacroCallReconstructorTest.cpp44
-rw-r--r--clang/unittests/Format/MacroExpanderTest.cpp19
-rw-r--r--clang/unittests/Format/MatchFilePathTest.cpp2
-rw-r--r--clang/unittests/Format/NamespaceEndCommentsFixerTest.cpp11
-rw-r--r--clang/unittests/Format/ObjCPropertyAttributeOrderFixerTest.cpp2
-rw-r--r--clang/unittests/Format/QualifierFixerTest.cpp2
-rw-r--r--clang/unittests/Format/SortImportsTestJS.cpp9
-rw-r--r--clang/unittests/Format/SortImportsTestJava.cpp2
-rw-r--r--clang/unittests/Format/SortIncludesTest.cpp6
-rw-r--r--clang/unittests/Format/TestLexer.h14
-rw-r--r--clang/unittests/Format/TokenAnnotatorTest.cpp81
-rw-r--r--clang/unittests/Format/UsingDeclarationsSorterTest.cpp8
-rw-r--r--clang/utils/TableGen/ClangDiagnosticsEmitter.cpp2
-rwxr-xr-xclang/www/cxx_dr_status.html38
-rw-r--r--compiler-rt/lib/dfsan/dfsan_allocator.cpp2
-rw-r--r--compiler-rt/lib/dfsan/dfsan_custom.cpp26
-rw-r--r--compiler-rt/lib/lsan/lsan_allocator.cpp2
-rw-r--r--compiler-rt/lib/msan/msan_allocator.cpp2
-rw-r--r--compiler-rt/test/dfsan/custom.cpp67
-rw-r--r--flang/include/flang/Lower/AbstractConverter.h3
-rw-r--r--flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td10
-rw-r--r--flang/include/flang/Optimizer/HLFIR/Passes.h1
-rw-r--r--flang/include/flang/Optimizer/HLFIR/Passes.td3
-rw-r--r--flang/include/flang/Semantics/openmp-directive-sets.h2
-rw-r--r--flang/include/flang/Tools/CLOptions.inc2
-rw-r--r--flang/lib/Lower/Bridge.cpp141
-rw-r--r--flang/lib/Lower/OpenMP/DataSharingProcessor.cpp110
-rw-r--r--flang/lib/Lower/OpenMP/DataSharingProcessor.h9
-rw-r--r--flang/lib/Lower/OpenMP/Decomposer.cpp5
-rw-r--r--flang/lib/Lower/OpenMP/Decomposer.h3
-rw-r--r--flang/lib/Lower/OpenMP/OpenMP.cpp30
-rw-r--r--flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt1
-rw-r--r--flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp18
-rw-r--r--flang/lib/Optimizer/HLFIR/Transforms/InlineElementals.cpp10
-rw-r--r--flang/lib/Parser/openmp-parsers.cpp11
-rw-r--r--flang/lib/Parser/unparse.cpp18
-rw-r--r--flang/lib/Semantics/resolve-directives.cpp8
-rw-r--r--flang/runtime/CMakeLists.txt1
-rw-r--r--flang/runtime/edit-output.cpp7
-rw-r--r--flang/test/Driver/mlir-debug-pass-pipeline.f907
-rw-r--r--flang/test/Driver/mlir-pass-pipeline.f9019
-rw-r--r--flang/test/Fir/basic-program.fir3
-rw-r--r--flang/test/Lower/CUDA/cuda-data-transfer.cuf28
-rw-r--r--flang/test/Lower/OpenMP/Todo/masked-directive.f9013
-rw-r--r--flang/test/Lower/OpenMP/Todo/omp-default-clause-inner-loop.f902
-rw-r--r--flang/test/Lower/OpenMP/copyin.f9016
-rw-r--r--flang/test/Lower/OpenMP/critical.f9023
-rw-r--r--flang/test/Lower/OpenMP/default-clause.f9027
-rw-r--r--flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f9013
-rw-r--r--flang/test/Lower/OpenMP/hlfir-wsloop.f902
-rw-r--r--flang/test/Lower/OpenMP/lastprivate-iv.f9033
-rw-r--r--flang/test/Lower/OpenMP/parallel-lastprivate-clause-scalar.f9023
-rw-r--r--flang/test/Lower/OpenMP/parallel-private-clause-fixes.f904
-rw-r--r--flang/test/Lower/OpenMP/parallel-private-clause.f9017
-rw-r--r--flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f902
-rw-r--r--flang/test/Lower/OpenMP/parallel-reduction3.f902
-rw-r--r--flang/test/Lower/OpenMP/parallel-wsloop-firstpriv.f9015
-rw-r--r--flang/test/Lower/OpenMP/parallel-wsloop.f9032
-rw-r--r--flang/test/Lower/OpenMP/stop-stmt-in-region.f908
-rw-r--r--flang/test/Lower/OpenMP/target.f902
-rw-r--r--flang/test/Lower/OpenMP/unstructured.f9014
-rw-r--r--flang/test/Lower/OpenMP/wsloop-collapse.f9040
-rw-r--r--flang/test/Lower/OpenMP/wsloop-monotonic.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-nonmonotonic.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-add-byref.f9014
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir-byref.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-add.f9014
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-array.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-array2.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-iand-byref.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-iand.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-ieor-byref.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-ieor.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-ior-byref.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-ior.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-logical-and-byref.f906
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f906
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv-byref.f906
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f906
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv-byref.f906
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f906
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-logical-or-byref.f906
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f906
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-max-byref.f906
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir-byref.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-max.f906
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-min-byref.f906
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-min.f906
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-min2.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-mul-byref.f9014
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-mul.f9014
-rw-r--r--flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f902
-rw-r--r--flang/test/Lower/OpenMP/wsloop-variable.f9020
-rw-r--r--flang/test/Lower/OpenMP/wsloop.f906
-rw-r--r--flang/test/Lower/branching-directive.f9077
-rw-r--r--flang/test/Lower/unstructured-control-flow.f9031
-rw-r--r--flang/test/Parser/OpenMP/masked-unparse.f9092
-rw-r--r--flang/test/Semantics/OpenMP/allocate-clause01.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocate-directive.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocate01.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocate02.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocate03.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocate04.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocate05.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocate06.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocate07.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocate08.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocate09.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocators01.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocators02.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocators03.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocators04.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocators05.f902
-rw-r--r--flang/test/Semantics/OpenMP/allocators06.f902
-rw-r--r--flang/test/Semantics/OpenMP/atomic-hint-clause.f902
-rw-r--r--flang/test/Semantics/OpenMP/atomic.f902
-rw-r--r--flang/test/Semantics/OpenMP/atomic01.f902
-rw-r--r--flang/test/Semantics/OpenMP/atomic02.f902
-rw-r--r--flang/test/Semantics/OpenMP/atomic03.f902
-rw-r--r--flang/test/Semantics/OpenMP/atomic04.f902
-rw-r--r--flang/test/Semantics/OpenMP/atomic05.f902
-rw-r--r--flang/test/Semantics/OpenMP/barrier.f902
-rw-r--r--flang/test/Semantics/OpenMP/clause-validity01.f902
-rw-r--r--flang/test/Semantics/OpenMP/combined-constructs.f902
-rw-r--r--flang/test/Semantics/OpenMP/common-block.f902
-rw-r--r--flang/test/Semantics/OpenMP/compiler-directive.f902
-rw-r--r--flang/test/Semantics/OpenMP/copyin01.f902
-rw-r--r--flang/test/Semantics/OpenMP/copyin02.f902
-rw-r--r--flang/test/Semantics/OpenMP/copyin03.f902
-rw-r--r--flang/test/Semantics/OpenMP/copyin04.f902
-rw-r--r--flang/test/Semantics/OpenMP/copyin05.f902
-rw-r--r--flang/test/Semantics/OpenMP/copying.f902
-rw-r--r--flang/test/Semantics/OpenMP/copyprivate01.f902
-rw-r--r--flang/test/Semantics/OpenMP/copyprivate02.f902
-rw-r--r--flang/test/Semantics/OpenMP/copyprivate03.f902
-rw-r--r--flang/test/Semantics/OpenMP/critical-empty.f902
-rw-r--r--flang/test/Semantics/OpenMP/critical-hint-clause.f902
-rw-r--r--flang/test/Semantics/OpenMP/do02.f9021
-rw-r--r--flang/test/Semantics/OpenMP/masked.f9013
-rw-r--r--flang/test/Semantics/OpenMP/sections01.f902
-rw-r--r--flang/test/Semantics/OpenMP/sections02.f902
-rw-r--r--flang/test/Semantics/OpenMP/sections03.f902
-rw-r--r--flang/test/Semantics/OpenMP/simd-aligned.f902
-rw-r--r--flang/test/Semantics/OpenMP/simd-nontemporal.f902
-rw-r--r--flang/test/Semantics/OpenMP/simd01.f902
-rw-r--r--flang/test/Semantics/OpenMP/simd02.f902
-rw-r--r--flang/test/Semantics/OpenMP/simd03.f902
-rw-r--r--flang/test/Semantics/OpenMP/single01.f902
-rw-r--r--flang/test/Semantics/OpenMP/single02.f902
-rw-r--r--flang/test/Semantics/OpenMP/struct.f902
-rw-r--r--flang/test/Semantics/OpenMP/symbol01.f902
-rw-r--r--flang/test/Semantics/OpenMP/symbol02.f902
-rw-r--r--flang/test/Semantics/OpenMP/symbol03.f902
-rw-r--r--flang/test/Semantics/OpenMP/symbol04.f902
-rw-r--r--flang/test/Semantics/OpenMP/symbol05.f902
-rw-r--r--flang/test/Semantics/OpenMP/symbol06.f902
-rw-r--r--flang/test/Semantics/OpenMP/symbol07.f902
-rw-r--r--flang/test/Semantics/OpenMP/symbol08.f902
-rw-r--r--flang/test/Semantics/OpenMP/symbol09.f902
-rw-r--r--flang/test/Semantics/OpenMP/sync-critical01.f902
-rw-r--r--flang/test/Semantics/OpenMP/sync-critical02.f902
-rw-r--r--flang/test/Semantics/OpenMP/taskgroup01.f902
-rw-r--r--flang/test/Semantics/OpenMP/taskloop01.f902
-rw-r--r--flang/test/Semantics/OpenMP/taskloop02.f902
-rw-r--r--flang/test/Semantics/OpenMP/taskloop03.f9025
-rw-r--r--flang/test/Semantics/OpenMP/taskwait.f902
-rw-r--r--flang/test/Semantics/OpenMP/threadprivate01.f902
-rw-r--r--flang/test/Semantics/OpenMP/threadprivate02.f902
-rw-r--r--flang/test/Semantics/OpenMP/threadprivate03.f902
-rw-r--r--flang/test/Semantics/OpenMP/threadprivate04.f902
-rw-r--r--flang/test/Semantics/OpenMP/threadprivate05.f902
-rw-r--r--flang/test/Semantics/OpenMP/threadprivate06.f902
-rw-r--r--flang/test/Semantics/OpenMP/threadprivate07.f902
-rw-r--r--flang/test/Semantics/OpenMP/use_device_addr.f902
-rw-r--r--flang/test/Semantics/OpenMP/use_device_addr1.f902
-rw-r--r--flang/test/Semantics/OpenMP/use_device_ptr.f902
-rw-r--r--flang/test/Semantics/OpenMP/use_device_ptr1.f902
-rw-r--r--flang/test/Semantics/OpenMP/workshare01.f902
-rw-r--r--flang/test/Semantics/OpenMP/workshare02.f902
-rw-r--r--flang/test/Semantics/OpenMP/workshare03.f902
-rw-r--r--flang/test/Semantics/OpenMP/workshare04.f902
-rw-r--r--flang/test/Semantics/OpenMP/workshare05.f902
-rw-r--r--libc/cmake/modules/LLVMLibCObjectRules.cmake3
-rw-r--r--libc/config/baremetal/arm/entrypoints.txt4
-rw-r--r--libc/config/baremetal/riscv/entrypoints.txt4
-rw-r--r--libc/include/llvm-libc-macros/linux/CMakeLists.txt6
-rw-r--r--libc/include/llvm-libc-macros/linux/error-number-macros.h8
-rw-r--r--libc/include/llvm-libc-macros/linux/mips/CMakeLists.txt5
-rw-r--r--libc/include/llvm-libc-macros/linux/mips/error-number-macros.h24
-rw-r--r--libc/include/llvm-libc-macros/linux/sparc/CMakeLists.txt5
-rw-r--r--libc/include/llvm-libc-macros/linux/sparc/error-number-macros.h24
-rw-r--r--libc/src/__support/threads/CMakeLists.txt9
-rw-r--r--libc/src/__support/threads/CndVar.h52
-rw-r--r--libc/src/__support/threads/linux/CMakeLists.txt13
-rw-r--r--libc/src/__support/threads/linux/CndVar.cpp103
-rw-r--r--libc/src/setjmp/x86_64/CMakeLists.txt5
-rw-r--r--libc/src/threads/linux/CMakeLists.txt11
-rw-r--r--libc/src/threads/linux/CndVar.h148
-rw-r--r--libc/src/threads/linux/cnd_broadcast.cpp11
-rw-r--r--libc/src/threads/linux/cnd_destroy.cpp7
-rw-r--r--libc/src/threads/linux/cnd_init.cpp9
-rw-r--r--libc/src/threads/linux/cnd_signal.cpp10
-rw-r--r--libc/src/threads/linux/cnd_wait.cpp11
-rw-r--r--libc/src/time/gpu/time_utils.cpp3
-rw-r--r--libc/src/time/gpu/time_utils.h5
-rw-r--r--libc/startup/baremetal/CMakeLists.txt11
-rw-r--r--libc/startup/baremetal/fini.cpp27
-rw-r--r--libc/startup/baremetal/init.cpp32
-rw-r--r--libc/test/integration/scudo/CMakeLists.txt4
-rw-r--r--libcxx/docs/ReleaseNotes/19.rst1
-rw-r--r--libcxx/docs/Status/Cxx20Papers.csv4
-rw-r--r--libcxx/docs/Status/Cxx23Issues.csv2
-rw-r--r--libcxx/include/CMakeLists.txt2
-rw-r--r--libcxx/include/__atomic/atomic_ref.h360
-rw-r--r--libcxx/include/__atomic/atomic_sync.h1
-rw-r--r--libcxx/include/__atomic/check_memory_order.h4
-rw-r--r--libcxx/include/__atomic/cxx_atomic_impl.h27
-rw-r--r--libcxx/include/__atomic/to_gcc_order.h54
-rw-r--r--libcxx/include/__type_traits/has_unique_object_representation.h6
-rw-r--r--libcxx/include/atomic1
-rw-r--r--libcxx/include/libcxx.imp869
-rw-r--r--libcxx/include/locale53
-rw-r--r--libcxx/include/module.modulemap10
-rw-r--r--libcxx/modules/std/atomic.inc2
-rw-r--r--libcxx/src/chrono.cpp4
-rw-r--r--libcxx/src/locale.cpp4
-rw-r--r--libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp58
-rw-r--r--libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp58
-rw-r--r--libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp40
-rw-r--r--libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp55
-rw-r--r--libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp63
-rw-r--r--libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp55
-rw-r--r--libcxx/test/std/atomics/atomics.ref/assign.pass.cpp50
-rw-r--r--libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp60
-rw-r--r--libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp56
-rw-r--r--libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp56
-rw-r--r--libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp221
-rw-r--r--libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp226
-rw-r--r--libcxx/test/std/atomics/atomics.ref/convert.pass.cpp45
-rw-r--r--libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp37
-rw-r--r--libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp33
-rw-r--r--libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp45
-rw-r--r--libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp113
-rw-r--r--libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp69
-rw-r--r--libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp68
-rw-r--r--libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp113
-rw-r--r--libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp68
-rw-r--r--libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp97
-rw-r--r--libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp71
-rw-r--r--libcxx/test/std/atomics/atomics.ref/load.pass.cpp62
-rw-r--r--libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp132
-rw-r--r--libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp78
-rw-r--r--libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp46
-rw-r--r--libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp79
-rw-r--r--libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp79
-rw-r--r--libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp39
-rw-r--r--libcxx/test/std/atomics/atomics.ref/requires-trivially-copyable.verify.cpp26
-rw-r--r--libcxx/test/std/atomics/atomics.ref/store.pass.cpp61
-rw-r--r--libcxx/test/std/atomics/atomics.ref/test_helper.h136
-rw-r--r--libcxx/test/std/atomics/atomics.ref/wait.pass.cpp88
-rw-r--r--libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp161
-rw-r--r--libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp161
-rw-r--r--libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp161
-rw-r--r--libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/assign.pass.cpp4
-rw-r--r--libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/copy.pass.cpp4
-rw-r--r--libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.cons/mutex.pass.cpp132
-rw-r--r--libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/lock.pass.cpp140
-rw-r--r--libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/try_lock.pass.cpp135
-rw-r--r--libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/has_unique_object_representations.pass.cpp2
-rw-r--r--libcxx/utils/libcxx/test/features.py35
-rw-r--r--libunwind/include/__libunwind_config.h4
-rw-r--r--libunwind/src/UnwindLevel1.c3
-rw-r--r--libunwind/src/UnwindRegistersRestore.S4
-rw-r--r--libunwind/src/UnwindRegistersSave.S4
-rw-r--r--libunwind/src/libunwind.cpp4
-rw-r--r--lld/COFF/DriverUtils.cpp40
-rw-r--r--lld/ELF/Config.h5
-rw-r--r--lld/ELF/Driver.cpp36
-rw-r--r--lld/test/COFF/arm64ec-exports.s121
-rw-r--r--lld/test/ELF/aarch64-feature-gcs.s134
-rw-r--r--lldb/include/lldb/API/SBCommandInterpreter.h8
-rw-r--r--lldb/include/lldb/API/SBDebugger.h13
-rw-r--r--lldb/include/lldb/Core/Debugger.h31
-rw-r--r--lldb/include/lldb/Interpreter/CommandInterpreter.h18
-rw-r--r--lldb/include/lldb/lldb-types.h2
-rw-r--r--lldb/source/API/SBCommandInterpreter.cpp16
-rw-r--r--lldb/source/API/SBDebugger.cpp20
-rw-r--r--lldb/source/Commands/CommandObjectThread.cpp4
-rw-r--r--lldb/source/Core/Debugger.cpp45
-rw-r--r--lldb/source/Interpreter/CommandInterpreter.cpp53
-rw-r--r--lldb/source/Interpreter/InterpreterProperties.td4
-rw-r--r--lldb/source/Plugins/Instruction/ARM64/EmulateInstructionARM64.cpp2
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.cpp6
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.h5
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp28
-rw-r--r--lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h2
-rw-r--r--lldb/source/Plugins/UnwindAssembly/InstEmulation/UnwindAssemblyInstEmulation.cpp4
-rw-r--r--lldb/source/Target/RegisterContextUnwind.cpp6
-rw-r--r--lldb/test/API/commands/session/save/TestSessionSave.py12
-rw-r--r--lldb/test/API/functionalities/bt-interrupt/main.c1
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/Makefile2
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/TestWithLimitDebugInfo.py40
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/base.cpp9
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/base.h22
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/derived.cpp11
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/derived.h37
-rw-r--r--lldb/test/API/lang/cpp/limit-debug-info/main.cpp6
-rw-r--r--lldb/test/API/python_api/debugger/TestDebuggerAPI.py121
-rw-r--r--lldb/test/API/python_api/interpreter/TestCommandInterpreterAPI.py172
-rw-r--r--lldb/test/API/python_api/interpreter/main.c5
-rw-r--r--lldb/test/API/tools/lldb-dap/evaluate/TestDAP_evaluate.py2
-rw-r--r--lldb/test/API/tools/lldb-dap/variables/TestDAP_variables.py29
-rw-r--r--lldb/test/Shell/Unwind/Inputs/signal-in-leaf-function-aarch64.c15
-rw-r--r--lldb/test/Shell/Unwind/signal-in-leaf-function-aarch64.test30
-rw-r--r--lldb/tools/lldb-dap/JSONUtils.cpp6
-rw-r--r--lldb/unittests/SymbolFile/DWARF/DWARFDIETest.cpp24
-rw-r--r--lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp24
-rw-r--r--llvm/cmake/modules/HandleLLVMOptions.cmake2
-rw-r--r--llvm/docs/AMDGPUUsage.rst2
-rw-r--r--llvm/docs/DeveloperPolicy.rst34
-rw-r--r--llvm/docs/LangRef.rst18
-rw-r--r--llvm/docs/MemorySSA.rst17
-rw-r--r--llvm/docs/RISCVUsage.rst5
-rw-r--r--llvm/docs/ReleaseNotes.rst1
-rw-r--r--llvm/include/llvm/Analysis/CFG.h12
-rw-r--r--llvm/include/llvm/Analysis/InstSimplifyFolder.h13
-rw-r--r--llvm/include/llvm/Analysis/TargetFolder.h11
-rw-r--r--llvm/include/llvm/BinaryFormat/ELF.h4
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h4
-rw-r--r--llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h17
-rw-r--r--llvm/include/llvm/CodeGen/SelectionDAG.h5
-rw-r--r--llvm/include/llvm/IR/ConstantFolder.h11
-rw-r--r--llvm/include/llvm/IR/IRBuilder.h2
-rw-r--r--llvm/include/llvm/IR/IRBuilderFolder.h11
-rw-r--r--llvm/include/llvm/IR/ModuleSummaryIndex.h7
-rw-r--r--llvm/include/llvm/IR/NoFolder.h11
-rw-r--r--llvm/include/llvm/MCA/IncrementalSourceMgr.h2
-rw-r--r--llvm/include/llvm/MCA/InstrBuilder.h3
-rw-r--r--llvm/include/llvm/ProfileData/SampleProfReader.h10
-rw-r--r--llvm/include/llvm/Target/TargetSelectionDAG.td3
-rw-r--r--llvm/include/llvm/Transforms/IPO/FunctionImport.h15
-rw-r--r--llvm/lib/Analysis/CFG.cpp74
-rw-r--r--llvm/lib/Analysis/InstructionSimplify.cpp13
-rw-r--r--llvm/lib/Analysis/LoopAccessAnalysis.cpp50
-rw-r--r--llvm/lib/Analysis/TargetLibraryInfo.cpp3
-rw-r--r--llvm/lib/Analysis/ValueTracking.cpp34
-rw-r--r--llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp4
-rw-r--r--llvm/lib/CodeGen/MachineScheduler.cpp6
-rw-r--r--llvm/lib/CodeGen/RegisterPressure.cpp6
-rw-r--r--llvm/lib/CodeGen/ScheduleDAG.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp11
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp9
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp17
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp19
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp28
-rw-r--r--llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp2
-rw-r--r--llvm/lib/IR/IRBuilder.cpp5
-rw-r--r--llvm/lib/IR/MDBuilder.cpp14
-rw-r--r--llvm/lib/IR/Module.cpp2
-rw-r--r--llvm/lib/LTO/LTO.cpp32
-rw-r--r--llvm/lib/LTO/LTOBackend.cpp9
-rw-r--r--llvm/lib/ProfileData/SampleProfReader.cpp8
-rw-r--r--llvm/lib/Support/LockFileManager.cpp2
-rw-r--r--llvm/lib/Support/raw_socket_stream.cpp23
-rw-r--r--llvm/lib/Target/AArch64/AArch64PointerAuth.cpp10
-rw-r--r--llvm/lib/Target/AArch64/AArch64PointerAuth.h12
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUInstructions.td22
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp91
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp65
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp11
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.h2
-rw-r--r--llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp76
-rw-r--r--llvm/lib/Target/AMDGPU/VOP1Instructions.td40
-rw-r--r--llvm/lib/Target/BPF/BPFMIChecking.cpp9
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchSubtarget.h5
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp4
-rw-r--r--llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h1
-rw-r--r--llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp16
-rw-r--r--llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp29
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.cpp6
-rw-r--r--llvm/lib/Target/PowerPC/PPCInstrInfo.td2
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp1
-rw-r--r--llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp3
-rw-r--r--llvm/lib/Target/RISCV/RISCVFeatures.td8
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp25
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.h5
-rw-r--r--llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp138
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoV.td13
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td12
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td12
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td14
-rw-r--r--llvm/lib/Target/RISCV/RISCVSchedSiFive7.td9
-rw-r--r--llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td8
-rw-r--r--llvm/lib/Target/RISCV/RISCVScheduleV.td10
-rw-r--r--llvm/lib/Target/RISCV/RISCVTargetMachine.cpp19
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp20
-rw-r--r--llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp17
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp2
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroElide.cpp6
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroFrame.cpp60
-rw-r--r--llvm/lib/Transforms/IPO/FunctionImport.cpp270
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp11
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp11
-rw-r--r--llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp47
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp8
-rw-r--r--llvm/lib/Transforms/Scalar/DivRemPairs.cpp5
-rw-r--r--llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp10
-rw-r--r--llvm/lib/Transforms/Scalar/NaryReassociate.cpp1
-rw-r--r--llvm/lib/Transforms/Scalar/SROA.cpp4
-rw-r--r--llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp19
-rw-r--r--llvm/lib/Transforms/Utils/CloneFunction.cpp7
-rw-r--r--llvm/lib/Transforms/Vectorize/LoopVectorize.cpp4
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h43
-rw-r--r--llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp4
-rw-r--r--llvm/lib/Transforms/Vectorize/VectorCombine.cpp231
-rw-r--r--llvm/test/Analysis/CostModel/AMDGPU/shufflevector.ll1621
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll12
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/invariant-dependence-before.ll114
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/loop-invariant-dep-with-backedge-taken-count.ll14
-rw-r--r--llvm/test/Analysis/LoopAccessAnalysis/non-constant-strides-backward.ll7
-rw-r--r--llvm/test/Assembler/ConstantExprFold.ll32
-rw-r--r--llvm/test/CodeGen/AArch64/exp10-libcall-names.ll39
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir16
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll152
-rw-r--r--llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll57
-rw-r--r--llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll76
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll76
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll76
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll3872
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll3498
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll3498
-rw-r--r--llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll3872
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-ctor-dtor-constexpr-alias.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/lower-ctor-dtor.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll109
-rw-r--r--llvm/test/CodeGen/AMDGPU/permute_i8.ll25
-rw-r--r--llvm/test/CodeGen/AMDGPU/simplify-libcalls.ll7
-rw-r--r--llvm/test/CodeGen/ARM/exp10-libcall-names.ll39
-rw-r--r--llvm/test/CodeGen/BPF/xadd.ll2
-rw-r--r--llvm/test/CodeGen/NVPTX/param-overalign.ll2
-rw-r--r--llvm/test/CodeGen/NVPTX/st-param-imm.ll2
-rw-r--r--llvm/test/CodeGen/PowerPC/toc-data.ll67
-rw-r--r--llvm/test/CodeGen/PowerPC/vec_shuffle.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll201
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv32.mir62
-rw-r--r--llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv64.mir96
-rw-r--r--llvm/test/CodeGen/RISCV/O0-pipeline.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/O3-pipeline.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/attributes.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/pr69586.ll210
-rw-r--r--llvm/test/CodeGen/RISCV/pr90730.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/abs-vp.ll11
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll84
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll234
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll258
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll39
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll92
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/compressstore.ll53
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll129
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll182
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll35
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll36
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll410
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll234
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll7
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll94
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll1150
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll523
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll1066
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll17
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll23
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll94
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll31
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll31
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll114
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll19
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll29
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll181
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll69
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll192
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll76
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll903
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll105
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll127
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll1243
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll1938
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll41
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll71
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll140
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll20
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll52
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll58
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll104
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll94
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll94
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll94
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll55
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll42
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll3
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-transpose.ll25
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll156
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll5
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll11
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll29
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll70
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll29
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll29
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll70
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll80
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll87
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll86
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll15
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll21
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll21
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/floor-vp.ll92
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll99
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll99
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll132
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll135
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/masked-tama.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll31
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll57
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll124
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/pr63596.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rint-vp.ll137
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/round-vp.ll143
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll143
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll143
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll342
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll18
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll40
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll1
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll68
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/stepvector.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll24
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll11
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vcpop.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll50
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll46
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll192
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vector-splice.ll58
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfirst.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll38
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll156
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll144
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll6
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll149
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll38
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll165
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll141
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll26
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfeq.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfge.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfgt.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfle.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmflt.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmfne.ll96
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsbf.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmseq.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsge.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll176
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsgt.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsif.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsle.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsleu.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmslt.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsltu.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsne.ll178
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vmsof.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll14
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll80
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vpload.ll10
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll44
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vpstore.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll12
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll76
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll16
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll112
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll48
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll4
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll5
-rw-r--r--llvm/test/CodeGen/SPIRV/execution-mode-reqd_work_group_size.ll35
-rw-r--r--llvm/test/CodeGen/SPIRV/execution-mode-work_group_size_hint.ll34
-rw-r--r--llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll94
-rw-r--r--llvm/test/CodeGen/X86/coalescer-add-implicit-def-subreg-to-reg-regression.ll45
-rw-r--r--llvm/test/CodeGen/X86/exp10-libcall-names.ll40
-rw-r--r--llvm/test/CodeGen/X86/fat-lto-section.ll2
-rw-r--r--llvm/test/CodeGen/X86/issue76416.ll78
-rw-r--r--llvm/test/CodeGen/X86/misched-critical-path.ll240
-rw-r--r--llvm/test/CodeGen/X86/pr90703.ll21
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/mem-attr.ll15
-rw-r--r--llvm/test/Linker/darwin-target-variant.ll42
-rw-r--r--llvm/test/MC/MachO/darwin-target-variant-reverse.ll2
-rw-r--r--llvm/test/MC/MachO/darwin-target-variant.ll2
-rw-r--r--llvm/test/MC/RISCV/rv32zaamo-invalid.s2
-rw-r--r--llvm/test/MC/RISCV/rv32zaamo-valid.s12
-rw-r--r--llvm/test/MC/RISCV/rv32zalrsc-invalid.s2
-rw-r--r--llvm/test/MC/RISCV/rv32zalrsc-valid.s12
-rw-r--r--llvm/test/MC/RISCV/rv64zaamo-invalid.s2
-rw-r--r--llvm/test/MC/RISCV/rv64zaamo-valid.s8
-rw-r--r--llvm/test/MC/RISCV/rv64zalrsc-invalid.s2
-rw-r--r--llvm/test/MC/RISCV/rv64zalrsc-valid.s8
-rw-r--r--llvm/test/TableGen/predicate-patfags.td30
-rw-r--r--llvm/test/ThinLTO/X86/funcimport-stats.ll4
-rw-r--r--llvm/test/ThinLTO/X86/import_callee_declaration.ll181
-rw-r--r--llvm/test/Transforms/Coroutines/coro-lifetime-end.ll142
-rw-r--r--llvm/test/Transforms/DivRemPairs/AMDGPU/div-rem-pairs.ll129
-rw-r--r--llvm/test/Transforms/DivRemPairs/AMDGPU/lit.local.cfg2
-rw-r--r--llvm/test/Transforms/FunctionImport/funcimport.ll5
-rw-r--r--llvm/test/Transforms/InstCombine/fma.ll31
-rw-r--r--llvm/test/Transforms/InstCombine/getelementptr.ll4
-rw-r--r--llvm/test/Transforms/InstCombine/known-bits.ll38
-rw-r--r--llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll17
-rw-r--r--llvm/test/Transforms/InstCombine/vec_shuffle.ll51
-rw-r--r--llvm/test/Transforms/InstSimplify/known-non-zero.ll183
-rw-r--r--llvm/test/Transforms/InstSimplify/shufflevector.ll171
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll261
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll9
-rw-r--r--llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll3
-rw-r--r--llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll22
-rw-r--r--llvm/test/Transforms/LoopVectorize/LoongArch/loongarch-interleaved.ll39
-rw-r--r--llvm/test/Transforms/NaryReassociate/preserving-debugloc-add-mul.ll69
-rw-r--r--llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll343
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll64
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll64
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AMDGPU/crash_extract_subvector_cost.ll13
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll46
-rw-r--r--llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll129
-rw-r--r--llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll8
-rw-r--r--llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll249
-rw-r--r--llvm/test/tools/llvm-driver/symlink-call.test2
-rw-r--r--llvm/tools/llvm-cxxfilt/CMakeLists.txt4
-rw-r--r--llvm/tools/llvm-link/llvm-link.cpp6
-rw-r--r--llvm/tools/llvm-profdata/llvm-profdata.cpp21
-rw-r--r--llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp4
-rw-r--r--llvm/unittests/Analysis/ValueTrackingTest.cpp2
-rw-r--r--llvm/unittests/IR/MDBuilderTest.cpp39
-rw-r--r--llvm/unittests/Support/raw_socket_stream_test.cpp19
-rw-r--r--llvm/unittests/TargetParser/RISCVISAInfoTest.cpp4
-rw-r--r--llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp7
-rw-r--r--llvm/utils/TableGen/Common/CodeGenDAGPatterns.h2
-rw-r--r--llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h23
-rw-r--r--llvm/utils/TableGen/GlobalISelEmitter.cpp6
-rw-r--r--llvm/utils/gn/secondary/clang/lib/Analysis/FlowSensitive/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/libcxx/include/BUILD.gn2
-rwxr-xr-xllvm/utils/revert_checker.py15
-rw-r--r--mlir/include/mlir/Dialect/IRDL/IR/IRDLOps.td3
-rw-r--r--mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td2
-rw-r--r--mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h16
-rw-r--r--mlir/include/mlir/Dialect/Math/Transforms/Passes.h4
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPAttrDefs.td79
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPDialect.td22
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td211
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td48
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td245
-rw-r--r--mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td6
-rw-r--r--mlir/include/mlir/Dialect/Polynomial/IR/Polynomial.td19
-rw-r--r--mlir/include/mlir/Dialect/Polynomial/IR/PolynomialTypes.td32
-rw-r--r--mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h9
-rw-r--r--mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp16
-rw-r--r--mlir/lib/Dialect/IRDL/IR/IRDL.cpp31
-rw-r--r--mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp93
-rw-r--r--mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp72
-rw-r--r--mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp22
-rw-r--r--mlir/lib/Dialect/Tensor/IR/TensorOps.cpp3
-rw-r--r--mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp70
-rw-r--r--mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp2
-rw-r--r--mlir/lib/Tools/mlir-opt/MlirOptMain.cpp2
-rw-r--r--mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir2
-rw-r--r--mlir/test/Conversion/VectorToArmSME/vector-to-arm-sme.mlir33
-rw-r--r--mlir/test/Dialect/Arith/int-range-interface.mlir2
-rw-r--r--mlir/test/Dialect/Arith/int-range-opts.mlir4
-rw-r--r--mlir/test/Dialect/IRDL/invalid.irdl.mlir17
-rw-r--r--mlir/test/Dialect/Linalg/transform-op-specialize.mlir25
-rw-r--r--mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_binary.mlir76
-rw-r--r--mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_unary.mlir25
-rw-r--r--mlir/test/Dialect/MemRef/canonicalize.mlir10
-rw-r--r--mlir/test/Dialect/Tensor/canonicalize.mlir12
-rw-r--r--mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir76
-rw-r--r--mlir/test/lib/Dialect/Test/TestOpDefs.cpp19
-rw-r--r--offload/plugins-nextgen/exports6
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/driver.bzl1
-rw-r--r--utils/bazel/llvm-project-overlay/mlir/BUILD.bazel5
1030 files changed, 34018 insertions, 27285 deletions
diff --git a/.ci/generate-buildkite-pipeline-premerge b/.ci/generate-buildkite-pipeline-premerge
index 78a9cb77ff7d..e1c66ac18e7a 100755
--- a/.ci/generate-buildkite-pipeline-premerge
+++ b/.ci/generate-buildkite-pipeline-premerge
@@ -68,7 +68,7 @@ function compute-projects-to-test() {
done
;;
clang)
- for p in clang-tools-extra compiler-rt flang lldb cross-project-tests; do
+ for p in clang-tools-extra compiler-rt lldb cross-project-tests; do
echo $p
done
;;
diff --git a/bolt/include/bolt/Core/BinaryContext.h b/bolt/include/bolt/Core/BinaryContext.h
index 75765819ac46..edd0f7d2365a 100644
--- a/bolt/include/bolt/Core/BinaryContext.h
+++ b/bolt/include/bolt/Core/BinaryContext.h
@@ -677,6 +677,9 @@ public:
/// have an origin file name available.
bool HasSymbolsWithFileName{false};
+ /// Does the binary have BAT section.
+ bool HasBATSection{false};
+
/// Sum of execution count of all functions
uint64_t SumExecutionCount{0};
diff --git a/bolt/include/bolt/Passes/BinaryPasses.h b/bolt/include/bolt/Passes/BinaryPasses.h
index 5d7692559eda..a07c9130041f 100644
--- a/bolt/include/bolt/Passes/BinaryPasses.h
+++ b/bolt/include/bolt/Passes/BinaryPasses.h
@@ -16,6 +16,7 @@
#include "bolt/Core/BinaryContext.h"
#include "bolt/Core/BinaryFunction.h"
#include "bolt/Core/DynoStats.h"
+#include "bolt/Profile/BoltAddressTranslation.h"
#include "llvm/Support/CommandLine.h"
#include <atomic>
#include <set>
@@ -399,8 +400,11 @@ public:
/// Prints a list of the top 100 functions sorted by a set of
/// dyno stats categories.
class PrintProgramStats : public BinaryFunctionPass {
+ BoltAddressTranslation *BAT = nullptr;
+
public:
- explicit PrintProgramStats() : BinaryFunctionPass(false) {}
+ explicit PrintProgramStats(BoltAddressTranslation *BAT = nullptr)
+ : BinaryFunctionPass(false), BAT(BAT) {}
const char *getName() const override { return "print-stats"; }
bool shouldPrint(const BinaryFunction &) const override { return false; }
diff --git a/bolt/include/bolt/Passes/StokeInfo.h b/bolt/include/bolt/Passes/StokeInfo.h
index 76417e6a2c3b..a18c2a05d015 100644
--- a/bolt/include/bolt/Passes/StokeInfo.h
+++ b/bolt/include/bolt/Passes/StokeInfo.h
@@ -87,10 +87,10 @@ struct StokeFuncInfo {
<< "," << NumBlocks << "," << IsLoopFree << "," << NumLoops << ","
<< MaxLoopDepth << "," << HotSize << "," << TotalSize << ","
<< Score << "," << HasCall << ",\"{ ";
- for (std::string S : DefIn)
+ for (const std::string &S : DefIn)
Outfile << "%" << S << " ";
Outfile << "}\",\"{ ";
- for (std::string S : LiveOut)
+ for (const std::string &S : LiveOut)
Outfile << "%" << S << " ";
Outfile << "}\"," << HeapOut << "," << StackOut << "," << HasRipAddr
<< "," << Omitted << "\n";
diff --git a/bolt/include/bolt/Profile/DataAggregator.h b/bolt/include/bolt/Profile/DataAggregator.h
index c158a9bb3e3f..6453b3070ceb 100644
--- a/bolt/include/bolt/Profile/DataAggregator.h
+++ b/bolt/include/bolt/Profile/DataAggregator.h
@@ -15,6 +15,7 @@
#define BOLT_PROFILE_DATA_AGGREGATOR_H
#include "bolt/Profile/DataReader.h"
+#include "bolt/Profile/YAMLProfileWriter.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/Error.h"
#include "llvm/Support/Program.h"
@@ -248,7 +249,7 @@ private:
BinaryFunction *getBATParentFunction(const BinaryFunction &Func) const;
/// Retrieve the location name to be used for samples recorded in \p Func.
- StringRef getLocationName(const BinaryFunction &Func) const;
+ static StringRef getLocationName(const BinaryFunction &Func, bool BAT);
/// Semantic actions - parser hooks to interpret parsed perf samples
/// Register a sample (non-LBR mode), i.e. a new hit at \p Address
@@ -490,6 +491,8 @@ public:
/// Parse the output generated by "perf buildid-list" to extract build-ids
/// and return a file name matching a given \p FileBuildID.
std::optional<StringRef> getFileNameForBuildID(StringRef FileBuildID);
+
+ friend class YAMLProfileWriter;
};
} // namespace bolt
} // namespace llvm
diff --git a/bolt/lib/Core/BinaryContext.cpp b/bolt/lib/Core/BinaryContext.cpp
index ad2eb18caf10..64d160adeee8 100644
--- a/bolt/lib/Core/BinaryContext.cpp
+++ b/bolt/lib/Core/BinaryContext.cpp
@@ -1322,7 +1322,9 @@ void BinaryContext::processInterproceduralReferences() {
InterproceduralReferences) {
BinaryFunction &Function = *It.first;
uint64_t Address = It.second;
- if (!Address || Function.isIgnored())
+ // Process interprocedural references from ignored functions in BAT mode
+ // (non-simple in non-relocation mode) to properly register entry points
+ if (!Address || (Function.isIgnored() && !HasBATSection))
continue;
BinaryFunction *TargetFunction =
diff --git a/bolt/lib/Core/BinaryFunction.cpp b/bolt/lib/Core/BinaryFunction.cpp
index 10b93e702984..1bb05f044fc8 100644
--- a/bolt/lib/Core/BinaryFunction.cpp
+++ b/bolt/lib/Core/BinaryFunction.cpp
@@ -1666,7 +1666,8 @@ void BinaryFunction::postProcessEntryPoints() {
// In non-relocation mode there's potentially an external undetectable
// reference to the entry point and hence we cannot move this entry
// point. Optimizing without moving could be difficult.
- if (!BC.HasRelocations)
+ // In BAT mode, register any known entry points for CFG construction.
+ if (!BC.HasRelocations && !BC.HasBATSection)
setSimple(false);
const uint32_t Offset = KV.first;
diff --git a/bolt/lib/Passes/BinaryPasses.cpp b/bolt/lib/Passes/BinaryPasses.cpp
index 298ba29ff5b3..11e22dea71fb 100644
--- a/bolt/lib/Passes/BinaryPasses.cpp
+++ b/bolt/lib/Passes/BinaryPasses.cpp
@@ -1390,9 +1390,19 @@ Error PrintProgramStats::runOnFunctions(BinaryContext &BC) {
if (Function.isPLTFunction())
continue;
+ // Adjustment for BAT mode: the profile for BOLT split fragments is combined
+ // so only count the hot fragment.
+ const uint64_t Address = Function.getAddress();
+ bool IsHotParentOfBOLTSplitFunction = !Function.getFragments().empty() &&
+ BAT && BAT->isBATFunction(Address) &&
+ !BAT->fetchParentAddress(Address);
+
++NumRegularFunctions;
- if (!Function.isSimple()) {
+ // In BOLTed binaries split functions are non-simple (due to non-relocation
+ // mode), but the original function is known to be simple and we have a
+ // valid profile for it.
+ if (!Function.isSimple() && !IsHotParentOfBOLTSplitFunction) {
if (Function.hasProfile())
++NumNonSimpleProfiledFunctions;
continue;
diff --git a/bolt/lib/Profile/DataAggregator.cpp b/bolt/lib/Profile/DataAggregator.cpp
index e06debcee741..c0fd69b98c82 100644
--- a/bolt/lib/Profile/DataAggregator.cpp
+++ b/bolt/lib/Profile/DataAggregator.cpp
@@ -613,7 +613,8 @@ Error DataAggregator::readProfile(BinaryContext &BC) {
if (std::error_code EC = writeBATYAML(BC, opts::SaveProfile))
report_error("cannot create output data file", EC);
}
- BC.logBOLTErrorsAndQuitOnFatal(PrintProgramStats().runOnFunctions(BC));
+ PrintProgramStats PPS(BAT);
+ BC.logBOLTErrorsAndQuitOnFatal(PPS.runOnFunctions(BC));
}
return Error::success();
@@ -673,7 +674,8 @@ DataAggregator::getBATParentFunction(const BinaryFunction &Func) const {
return nullptr;
}
-StringRef DataAggregator::getLocationName(const BinaryFunction &Func) const {
+StringRef DataAggregator::getLocationName(const BinaryFunction &Func,
+ bool BAT) {
if (!BAT)
return Func.getOneName();
@@ -702,7 +704,7 @@ bool DataAggregator::doSample(BinaryFunction &OrigFunc, uint64_t Address,
auto I = NamesToSamples.find(Func.getOneName());
if (I == NamesToSamples.end()) {
bool Success;
- StringRef LocName = getLocationName(Func);
+ StringRef LocName = getLocationName(Func, BAT);
std::tie(I, Success) = NamesToSamples.insert(
std::make_pair(Func.getOneName(),
FuncSampleData(LocName, FuncSampleData::ContainerTy())));
@@ -722,7 +724,7 @@ bool DataAggregator::doIntraBranch(BinaryFunction &Func, uint64_t From,
FuncBranchData *AggrData = getBranchData(Func);
if (!AggrData) {
AggrData = &NamesToBranches[Func.getOneName()];
- AggrData->Name = getLocationName(Func);
+ AggrData->Name = getLocationName(Func, BAT);
setBranchData(Func, AggrData);
}
@@ -741,7 +743,7 @@ bool DataAggregator::doInterBranch(BinaryFunction *FromFunc,
StringRef SrcFunc;
StringRef DstFunc;
if (FromFunc) {
- SrcFunc = getLocationName(*FromFunc);
+ SrcFunc = getLocationName(*FromFunc, BAT);
FromAggrData = getBranchData(*FromFunc);
if (!FromAggrData) {
FromAggrData = &NamesToBranches[FromFunc->getOneName()];
@@ -752,7 +754,7 @@ bool DataAggregator::doInterBranch(BinaryFunction *FromFunc,
recordExit(*FromFunc, From, Mispreds, Count);
}
if (ToFunc) {
- DstFunc = getLocationName(*ToFunc);
+ DstFunc = getLocationName(*ToFunc, BAT);
ToAggrData = getBranchData(*ToFunc);
if (!ToAggrData) {
ToAggrData = &NamesToBranches[ToFunc->getOneName()];
@@ -1227,7 +1229,7 @@ ErrorOr<Location> DataAggregator::parseLocationOrOffset() {
if (Sep == StringRef::npos)
return parseOffset();
StringRef LookAhead = ParsingBuf.substr(0, Sep);
- if (LookAhead.find_first_of(":") == StringRef::npos)
+ if (!LookAhead.contains(':'))
return parseOffset();
ErrorOr<StringRef> BuildID = parseString(':');
@@ -2340,7 +2342,7 @@ std::error_code DataAggregator::writeBATYAML(BinaryContext &BC,
continue;
BinaryFunction *BF = BC.getBinaryFunctionAtAddress(FuncAddress);
assert(BF);
- YamlBF.Name = getLocationName(*BF);
+ YamlBF.Name = getLocationName(*BF, BAT);
YamlBF.Id = BF->getFunctionNumber();
YamlBF.Hash = BAT->getBFHash(FuncAddress);
YamlBF.ExecCount = BF->getKnownExecutionCount();
diff --git a/bolt/lib/Profile/YAMLProfileWriter.cpp b/bolt/lib/Profile/YAMLProfileWriter.cpp
index ef04ba0d21ad..cf6b61ddd603 100644
--- a/bolt/lib/Profile/YAMLProfileWriter.cpp
+++ b/bolt/lib/Profile/YAMLProfileWriter.cpp
@@ -10,6 +10,7 @@
#include "bolt/Core/BinaryBasicBlock.h"
#include "bolt/Core/BinaryFunction.h"
#include "bolt/Profile/BoltAddressTranslation.h"
+#include "bolt/Profile/DataAggregator.h"
#include "bolt/Profile/ProfileReaderBase.h"
#include "bolt/Rewrite/RewriteInstance.h"
#include "llvm/Support/CommandLine.h"
@@ -39,6 +40,10 @@ const BinaryFunction *YAMLProfileWriter::setCSIDestination(
BC.getFunctionForSymbol(Symbol, &EntryID)) {
if (BAT && BAT->isBATFunction(Callee->getAddress()))
std::tie(Callee, EntryID) = BAT->translateSymbol(BC, *Symbol, Offset);
+ else if (const BinaryBasicBlock *BB =
+ Callee->getBasicBlockContainingOffset(Offset))
+ BC.getFunctionForSymbol(Callee->getSecondaryEntryPointSymbol(*BB),
+ &EntryID);
CSI.DestId = Callee->getFunctionNumber();
CSI.EntryDiscriminator = EntryID;
return Callee;
@@ -59,7 +64,7 @@ YAMLProfileWriter::convert(const BinaryFunction &BF, bool UseDFS,
BF.computeHash(UseDFS);
BF.computeBlockHashes();
- YamlBF.Name = BF.getPrintName();
+ YamlBF.Name = DataAggregator::getLocationName(BF, BAT);
YamlBF.Id = BF.getFunctionNumber();
YamlBF.Hash = BF.getHash();
YamlBF.NumBasicBlocks = BF.size();
diff --git a/bolt/lib/Rewrite/RewriteInstance.cpp b/bolt/lib/Rewrite/RewriteInstance.cpp
index 85b39176754b..9cc4c8c8c4fa 100644
--- a/bolt/lib/Rewrite/RewriteInstance.cpp
+++ b/bolt/lib/Rewrite/RewriteInstance.cpp
@@ -1988,6 +1988,7 @@ Error RewriteInstance::readSpecialSections() {
if (ErrorOr<BinarySection &> BATSec =
BC->getUniqueSectionByName(BoltAddressTranslation::SECTION_NAME)) {
+ BC->HasBATSection = true;
// Do not read BAT when plotting a heatmap
if (!opts::HeatmapMode) {
if (std::error_code EC = BAT->parse(BC->outs(), BATSec->getContents())) {
@@ -4808,6 +4809,40 @@ void RewriteInstance::updateELFSymbolTable(
// Create a new symbol based on the existing symbol.
ELFSymTy NewSymbol = Symbol;
+ // Handle special symbols based on their name.
+ Expected<StringRef> SymbolName = Symbol.getName(StringSection);
+ assert(SymbolName && "cannot get symbol name");
+
+ auto updateSymbolValue = [&](const StringRef Name,
+ std::optional<uint64_t> Value = std::nullopt) {
+ NewSymbol.st_value = Value ? *Value : getNewValueForSymbol(Name);
+ NewSymbol.st_shndx = ELF::SHN_ABS;
+ BC->outs() << "BOLT-INFO: setting " << Name << " to 0x"
+ << Twine::utohexstr(NewSymbol.st_value) << '\n';
+ };
+
+ if (*SymbolName == "__hot_start" || *SymbolName == "__hot_end") {
+ if (opts::HotText) {
+ updateSymbolValue(*SymbolName);
+ ++NumHotTextSymsUpdated;
+ }
+ goto registerSymbol;
+ }
+
+ if (*SymbolName == "__hot_data_start" || *SymbolName == "__hot_data_end") {
+ if (opts::HotData) {
+ updateSymbolValue(*SymbolName);
+ ++NumHotDataSymsUpdated;
+ }
+ goto registerSymbol;
+ }
+
+ if (*SymbolName == "_end") {
+ if (NextAvailableAddress > Symbol.st_value)
+ updateSymbolValue(*SymbolName, NextAvailableAddress);
+ goto registerSymbol;
+ }
+
if (Function) {
// If the symbol matched a function that was not emitted, update the
// corresponding section index but otherwise leave it unchanged.
@@ -4904,33 +4939,7 @@ void RewriteInstance::updateELFSymbolTable(
}
}
- // Handle special symbols based on their name.
- Expected<StringRef> SymbolName = Symbol.getName(StringSection);
- assert(SymbolName && "cannot get symbol name");
-
- auto updateSymbolValue = [&](const StringRef Name,
- std::optional<uint64_t> Value = std::nullopt) {
- NewSymbol.st_value = Value ? *Value : getNewValueForSymbol(Name);
- NewSymbol.st_shndx = ELF::SHN_ABS;
- BC->outs() << "BOLT-INFO: setting " << Name << " to 0x"
- << Twine::utohexstr(NewSymbol.st_value) << '\n';
- };
-
- if (opts::HotText &&
- (*SymbolName == "__hot_start" || *SymbolName == "__hot_end")) {
- updateSymbolValue(*SymbolName);
- ++NumHotTextSymsUpdated;
- }
-
- if (opts::HotData && (*SymbolName == "__hot_data_start" ||
- *SymbolName == "__hot_data_end")) {
- updateSymbolValue(*SymbolName);
- ++NumHotDataSymsUpdated;
- }
-
- if (*SymbolName == "_end" && NextAvailableAddress > Symbol.st_value)
- updateSymbolValue(*SymbolName, NextAvailableAddress);
-
+ registerSymbol:
if (IsDynSym)
Write((&Symbol - cantFail(Obj.symbols(&SymTabSection)).begin()) *
sizeof(ELFSymTy),
diff --git a/bolt/runtime/instr.cpp b/bolt/runtime/instr.cpp
index 16e0bbd55f90..d1f8a216badc 100644
--- a/bolt/runtime/instr.cpp
+++ b/bolt/runtime/instr.cpp
@@ -1245,7 +1245,6 @@ void Graph::computeEdgeFrequencies(const uint64_t *Counters,
continue;
assert(SpanningTreeNodes[Cur].NumInEdges == 1, "must have 1 parent");
- const uint32_t Parent = SpanningTreeNodes[Cur].InEdges[0].Node;
const uint32_t ParentEdge = SpanningTreeNodes[Cur].InEdges[0].ID;
// Calculate parent edge freq.
@@ -1464,9 +1463,8 @@ void visitCallFlowEntry(CallFlowHashTable::MapEntry &Entry, int FD,
int openProfile() {
// Build the profile name string by appending our PID
char Buf[BufSize];
- char *Ptr = Buf;
uint64_t PID = __getpid();
- Ptr = strCopy(Buf, __bolt_instr_filename, BufSize);
+ char *Ptr = strCopy(Buf, __bolt_instr_filename, BufSize);
if (__bolt_instr_use_pid) {
Ptr = strCopy(Ptr, ".", BufSize - (Ptr - Buf + 1));
Ptr = intToStr(Ptr, PID, 10);
diff --git a/bolt/test/X86/bolt-address-translation-yaml.test b/bolt/test/X86/bolt-address-translation-yaml.test
index e21513b7dfe5..9f2c2ef3ab98 100644
--- a/bolt/test/X86/bolt-address-translation-yaml.test
+++ b/bolt/test/X86/bolt-address-translation-yaml.test
@@ -31,7 +31,8 @@ RUN: perf2bolt %t.out --pa -p %p/Inputs/blarge_new_bat.preagg.txt -w %t.yaml -o
RUN: 2>&1 | FileCheck --check-prefix READ-BAT-CHECK %s
RUN: FileCheck --input-file %t.yaml --check-prefix YAML-BAT-CHECK %s
# Check that YAML converted from fdata matches YAML created directly with BAT.
-RUN: llvm-bolt %t.exe -data %t.fdata -w %t.yaml-fdata -o /dev/null
+RUN: llvm-bolt %t.exe -data %t.fdata -w %t.yaml-fdata -o /dev/null \
+RUN: 2>&1 | FileCheck --check-prefix READ-BAT-FDATA-CHECK %s
RUN: FileCheck --input-file %t.yaml-fdata --check-prefix YAML-BAT-CHECK %s
# Test resulting YAML profile with the original binary (no-stale mode)
@@ -45,6 +46,8 @@ WRITE-BAT-CHECK: BOLT-INFO: BAT section size (bytes): 384
READ-BAT-CHECK-NOT: BOLT-ERROR: unable to save profile in YAML format for input file processed by BOLT
READ-BAT-CHECK: BOLT-INFO: Parsed 5 BAT entries
READ-BAT-CHECK: PERF2BOLT: read 79 aggregated LBR entries
+READ-BAT-CHECK: BOLT-INFO: 5 out of 21 functions in the binary (23.8%) have non-empty execution profile
+READ-BAT-FDATA-CHECK: BOLT-INFO: 5 out of 16 functions in the binary (31.2%) have non-empty execution profile
YAML-BAT-CHECK: functions:
# Function not covered by BAT - has insns in basic block
diff --git a/bolt/test/X86/ignored-interprocedural-reference.s b/bolt/test/X86/ignored-interprocedural-reference.s
new file mode 100644
index 000000000000..12e4fb92adcc
--- /dev/null
+++ b/bolt/test/X86/ignored-interprocedural-reference.s
@@ -0,0 +1,49 @@
+# This reproduces a bug with not processing interprocedural references from
+# ignored functions.
+
+# REQUIRES: system-linux
+
+# RUN: llvm-mc -filetype=obj -triple x86_64-unknown-unknown %s -o %t.o
+# RUN: %clang %cflags %t.o -o %t.exe -nostdlib -Wl,-q
+# RUN: llvm-bolt %t.exe -o %t.out --enable-bat -funcs=main
+# RUN: link_fdata %s %t.out %t.preagg PREAGG
+# RUN: perf2bolt %t.out -p %t.preagg --pa -o %t.fdata -w %t.yaml
+# RUN: FileCheck %s --input-file=%t.fdata --check-prefix=CHECK-FDATA
+# RUN: FileCheck %s --input-file=%t.yaml --check-prefix=CHECK-YAML
+
+# CHECK-FDATA: 1 main 0 1 foo a 1 1
+# CHECK-YAML: name: main
+# CHECK-YAML: calls: {{.*}} disc: 1
+
+# PREAGG: B #main# #foo_secondary# 1 1
+# main calls foo at valid instruction offset past nops that are to be stripped.
+ .globl main
+main:
+ .cfi_startproc
+ call foo_secondary
+ ret
+ .cfi_endproc
+.size main,.-main
+
+# Placeholder cold fragment to force main to be ignored in non-relocation mode.
+ .globl main.cold
+main.cold:
+ .cfi_startproc
+ ud2
+ .cfi_endproc
+.size main.cold,.-main.cold
+
+# foo is set up to contain a valid instruction at called offset, and trapping
+# instructions past that.
+ .globl foo
+foo:
+ .cfi_startproc
+ .nops 10
+ .globl foo_secondary
+foo_secondary:
+ ret
+ .rept 20
+ int3
+ .endr
+ .cfi_endproc
+.size foo,.-foo
diff --git a/bolt/test/X86/register-fragments-bolt-symbols.s b/bolt/test/X86/register-fragments-bolt-symbols.s
index 6478adf19372..90c402b2234d 100644
--- a/bolt/test/X86/register-fragments-bolt-symbols.s
+++ b/bolt/test/X86/register-fragments-bolt-symbols.s
@@ -18,6 +18,11 @@
# RUN: FileCheck --input-file %t.bat.fdata --check-prefix=CHECK-FDATA %s
# RUN: FileCheck --input-file %t.bat.yaml --check-prefix=CHECK-YAML %s
+# RUN: link_fdata --no-redefine %s %t.bolt %t.preagg2 PREAGG2
+# PREAGG2: B X:0 #chain# 1 0
+# RUN: perf2bolt %t.bolt -p %t.preagg2 --pa -o %t.bat2.fdata -w %t.bat2.yaml
+# RUN: FileCheck %s --input-file %t.bat2.yaml --check-prefix=CHECK-YAML2
+
# CHECK-SYMS: l df *ABS* [[#]] chain.s
# CHECK-SYMS: l F .bolt.org.text [[#]] chain
# CHECK-SYMS: l F .text.cold [[#]] chain.cold.0
@@ -28,6 +33,9 @@
# CHECK-FDATA: 0 [unknown] 0 1 chain/chain.s/2 10 0 1
# CHECK-YAML: - name: 'chain/chain.s/2'
+# CHECK-YAML2: - name: 'chain/chain.s/1'
+## non-BAT function has non-zero insns:
+# CHECK-YAML2: insns: 1
.file "chain.s"
.text
diff --git a/bolt/test/link_fdata.py b/bolt/test/link_fdata.py
index 0232dd3211e9..3837e394ccc8 100755
--- a/bolt/test/link_fdata.py
+++ b/bolt/test/link_fdata.py
@@ -19,6 +19,7 @@ parser.add_argument("output")
parser.add_argument("prefix", nargs="?", default="FDATA", help="Custom FDATA prefix")
parser.add_argument("--nmtool", default="nm", help="Path to nm tool")
parser.add_argument("--no-lbr", action="store_true")
+parser.add_argument("--no-redefine", action="store_true")
args = parser.parse_args()
@@ -90,6 +91,8 @@ nm_output = subprocess.run(
symbols = {}
for symline in nm_output.splitlines():
symval, _, symname = symline.split(maxsplit=2)
+ if symname in symbols and args.no_redefine:
+ continue
symbols[symname] = symval
diff --git a/bolt/test/runtime/X86/hot-end-symbol.s b/bolt/test/runtime/X86/hot-end-symbol.s
index e6d83d77167a..6ae771cead75 100755
--- a/bolt/test/runtime/X86/hot-end-symbol.s
+++ b/bolt/test/runtime/X86/hot-end-symbol.s
@@ -12,6 +12,7 @@
# RUN: %clang %cflags -no-pie %t.o -o %t.exe -Wl,-q
# RUN: llvm-bolt %t.exe --relocs=1 --hot-text --reorder-functions=hfsort \
+# RUN: --split-functions --split-strategy=all \
# RUN: --data %t.fdata -o %t.out | FileCheck %s
# RUN: %t.out 1
@@ -30,12 +31,12 @@
# CHECK-OUTPUT: __hot_start
# CHECK-OUTPUT-NEXT: main
# CHECK-OUTPUT-NEXT: __hot_end
+# CHECK-OUTPUT-NOT: __hot_start.cold
.text
.globl main
.type main, %function
.globl __hot_start
- .type __hot_start, %object
.p2align 4
main:
__hot_start:
diff --git a/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp
index 36687a8e761e..c87b3ea7e261 100644
--- a/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/ForwardingReferenceOverloadCheck.cpp
@@ -54,7 +54,9 @@ AST_MATCHER(QualType, isEnableIf) {
AST_MATCHER_P(TemplateTypeParmDecl, hasDefaultArgument,
clang::ast_matchers::internal::Matcher<QualType>, TypeMatcher) {
return Node.hasDefaultArgument() &&
- TypeMatcher.matches(Node.getDefaultArgument(), Finder, Builder);
+ TypeMatcher.matches(
+ Node.getDefaultArgument().getArgument().getAsType(), Finder,
+ Builder);
}
AST_MATCHER(TemplateDecl, hasAssociatedConstraints) {
return Node.hasAssociatedConstraints();
diff --git a/clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp
index 09aaf3e31d5d..75f1107904fc 100644
--- a/clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/IncorrectEnableIfCheck.cpp
@@ -19,10 +19,11 @@ namespace {
AST_MATCHER_P(TemplateTypeParmDecl, hasUnnamedDefaultArgument,
ast_matchers::internal::Matcher<TypeLoc>, InnerMatcher) {
if (Node.getIdentifier() != nullptr || !Node.hasDefaultArgument() ||
- Node.getDefaultArgumentInfo() == nullptr)
+ Node.getDefaultArgument().getArgument().isNull())
return false;
- TypeLoc DefaultArgTypeLoc = Node.getDefaultArgumentInfo()->getTypeLoc();
+ TypeLoc DefaultArgTypeLoc =
+ Node.getDefaultArgument().getTypeSourceInfo()->getTypeLoc();
return InnerMatcher.matches(DefaultArgTypeLoc, Finder, Builder);
}
diff --git a/clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp
index 7a021fe14436..ea4d99586c71 100644
--- a/clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/UseConstraintsCheck.cpp
@@ -177,9 +177,11 @@ matchTrailingTemplateParam(const FunctionTemplateDecl *FunctionTemplate) {
dyn_cast<TemplateTypeParmDecl>(LastParam)) {
if (LastTemplateParam->hasDefaultArgument() &&
LastTemplateParam->getIdentifier() == nullptr) {
- return {matchEnableIfSpecialization(
- LastTemplateParam->getDefaultArgumentInfo()->getTypeLoc()),
- LastTemplateParam};
+ return {
+ matchEnableIfSpecialization(LastTemplateParam->getDefaultArgument()
+ .getTypeSourceInfo()
+ ->getTypeLoc()),
+ LastTemplateParam};
}
}
return {};
diff --git a/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp b/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp
index e811f5519de2..88e4886cd0df 100644
--- a/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp
+++ b/clang-tools-extra/clang-tidy/utils/RenamerClangTidyCheck.cpp
@@ -123,6 +123,9 @@ static const NamedDecl *getFailureForNamedDecl(const NamedDecl *ND) {
if (const auto *Method = dyn_cast<CXXMethodDecl>(ND)) {
if (const CXXMethodDecl *Overridden = getOverrideMethod(Method))
Canonical = cast<NamedDecl>(Overridden->getCanonicalDecl());
+ else if (const FunctionTemplateDecl *Primary = Method->getPrimaryTemplate())
+ if (const FunctionDecl *TemplatedDecl = Primary->getTemplatedDecl())
+ Canonical = cast<NamedDecl>(TemplatedDecl->getCanonicalDecl());
if (Canonical != ND)
return Canonical;
diff --git a/clang-tools-extra/clangd/Hover.cpp b/clang-tools-extra/clangd/Hover.cpp
index 06b949bc4a2b..2ec0994e846e 100644
--- a/clang-tools-extra/clangd/Hover.cpp
+++ b/clang-tools-extra/clangd/Hover.cpp
@@ -247,8 +247,12 @@ fetchTemplateParameters(const TemplateParameterList *Params,
if (!TTP->getName().empty())
P.Name = TTP->getNameAsString();
- if (TTP->hasDefaultArgument())
- P.Default = TTP->getDefaultArgument().getAsString(PP);
+ if (TTP->hasDefaultArgument()) {
+ P.Default.emplace();
+ llvm::raw_string_ostream Out(*P.Default);
+ TTP->getDefaultArgument().getArgument().print(PP, Out,
+ /*IncludeType=*/false);
+ }
} else if (const auto *NTTP = dyn_cast<NonTypeTemplateParmDecl>(Param)) {
P.Type = printType(NTTP, PP);
diff --git a/clang-tools-extra/clangd/test/infinite-instantiation.test b/clang-tools-extra/clangd/test/infinite-instantiation.test
index 85a1b656f490..a9c787c77027 100644
--- a/clang-tools-extra/clangd/test/infinite-instantiation.test
+++ b/clang-tools-extra/clangd/test/infinite-instantiation.test
@@ -1,5 +1,6 @@
-// RUN: cp %s %t.cpp
-// RUN: not clangd -check=%t.cpp 2>&1 | FileCheck -strict-whitespace %s
+// RUN: rm -rf %t.dir && mkdir -p %t.dir
+// RUN: echo '[{"directory": "%/t.dir", "command": "clang -ftemplate-depth=100 -x c++ %/s", "file": "%/s"}]' > %t.dir/compile_commands.json
+// RUN: not clangd --compile-commands-dir=%t.dir -check=%s 2>&1 | FileCheck -strict-whitespace %s
// CHECK: [template_recursion_depth_exceeded]
diff --git a/clang-tools-extra/docs/ReleaseNotes.rst b/clang-tools-extra/docs/ReleaseNotes.rst
index 6a9892bada91..741abc0a199a 100644
--- a/clang-tools-extra/docs/ReleaseNotes.rst
+++ b/clang-tools-extra/docs/ReleaseNotes.rst
@@ -375,7 +375,8 @@ Changes in existing checks
<clang-tidy/checks/readability/identifier-naming>` check in `GetConfigPerFile`
mode by resolving symbolic links to header files. Fixed handling of Hungarian
Prefix when configured to `LowerCase`. Added support for renaming designated
- initializers. Added support for renaming macro arguments.
+ initializers. Added support for renaming macro arguments. Fixed renaming
+ conflicts arising from out-of-line member function template definitions.
- Improved :doc:`readability-implicit-bool-conversion
<clang-tidy/checks/readability/implicit-bool-conversion>` check to provide
diff --git a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init-no-crash.cpp b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init-no-crash.cpp
index 300fff6cb179..2e2964dda1da 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init-no-crash.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init-no-crash.cpp
@@ -5,3 +5,11 @@ struct X {
// CHECK-MESSAGES: :[[@LINE-1]]:5: error: field has incomplete type 'X' [clang-diagnostic-error]
int a = 10;
};
+
+template <typename T> class NoCrash {
+ // CHECK-MESSAGES: :[[@LINE+2]]:20: error: base class has incomplete type
+ // CHECK-MESSAGES: :[[@LINE-2]]:29: note: definition of 'NoCrash<T>' is not complete until the closing '}'
+ class B : public NoCrash {
+ template <typename U> B(U u) {}
+ };
+};
diff --git a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init.cpp b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init.cpp
index 8d6992afef08..eaa73b906ce0 100644
--- a/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init.cpp
+++ b/clang-tools-extra/test/clang-tidy/checkers/cppcoreguidelines/pro-type-member-init.cpp
@@ -463,12 +463,6 @@ struct NegativeIncompleteArrayMember {
char e[];
};
-template <typename T> class NoCrash {
- class B : public NoCrash {
- template <typename U> B(U u) {}
- };
-};
-
struct PositiveBitfieldMember {
PositiveBitfieldMember() {}
// CHECK-MESSAGES: :[[@LINE-1]]:3: warning: constructor does not initialize these fields: F
diff --git a/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-outofline.cpp b/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-outofline.cpp
new file mode 100644
index 000000000000..f807875e2769
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/checkers/readability/identifier-naming-outofline.cpp
@@ -0,0 +1,30 @@
+// RUN: %check_clang_tidy %s readability-identifier-naming %t -std=c++20 \
+// RUN: --config='{CheckOptions: { \
+// RUN: readability-identifier-naming.MethodCase: CamelCase, \
+// RUN: }}'
+
+namespace SomeNamespace {
+namespace Inner {
+
+class SomeClass {
+public:
+ template <typename T>
+ int someMethod();
+// CHECK-MESSAGES: :[[@LINE-1]]:9: warning: invalid case style for method 'someMethod' [readability-identifier-naming]
+// CHECK-FIXES: {{^}} int SomeMethod();
+};
+template <typename T>
+int SomeClass::someMethod() {
+// CHECK-FIXES: {{^}}int SomeClass::SomeMethod() {
+ return 5;
+}
+
+} // namespace Inner
+
+void someFunc() {
+ Inner::SomeClass S;
+ S.someMethod<int>();
+// CHECK-FIXES: {{^}} S.SomeMethod<int>();
+}
+
+} // namespace SomeNamespace
diff --git a/clang/cmake/caches/HLSL.cmake b/clang/cmake/caches/HLSL.cmake
index 27f848fdccf0..ed813f60c9c6 100644
--- a/clang/cmake/caches/HLSL.cmake
+++ b/clang/cmake/caches/HLSL.cmake
@@ -12,7 +12,7 @@ set(LLVM_ENABLE_PROJECTS "clang;clang-tools-extra" CACHE STRING "")
set(CLANG_ENABLE_HLSL On CACHE BOOL "")
-if (NOT CMAKE_CONFIGURATION_TYPES)
+if (HLSL_ENABLE_DISTRIBUTION)
set(LLVM_DISTRIBUTION_COMPONENTS
"clang;hlsl-resource-headers;clangd"
CACHE STRING "")
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 5a123b0b86dd..81e9d0423f96 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -742,6 +742,13 @@ Bug Fixes to C++ Support
- Fix a bug with checking constrained non-type template parameters for equivalence. Fixes (#GH77377).
- Fix a bug where the last argument was not considered when considering the most viable function for
explicit object argument member functions. Fixes (#GH92188).
+- Fix a C++11 crash when a non-const non-static member function is defined out-of-line with
+ the ``constexpr`` specifier. Fixes (#GH61004).
+- Clang no longer transforms dependent qualified names into implicit class member access expressions
+ until it can be determined whether the name is that of a non-static member.
+- Clang now correctly diagnoses when the current instantiation is used as an incomplete base class.
+- Clang no longer treats ``constexpr`` class scope function template specializations of non-static members
+ as implicitly ``const`` in language modes after C++11.
Bug Fixes to AST Handling
^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/clang/docs/tools/clang-formatted-files.txt b/clang/docs/tools/clang-formatted-files.txt
index b747adfb6992..dee51e402b68 100644
--- a/clang/docs/tools/clang-formatted-files.txt
+++ b/clang/docs/tools/clang-formatted-files.txt
@@ -124,6 +124,7 @@ clang/include/clang/Analysis/Analyses/CFGReachabilityAnalysis.h
clang/include/clang/Analysis/Analyses/ExprMutationAnalyzer.h
clang/include/clang/Analysis/FlowSensitive/AdornedCFG.h
clang/include/clang/Analysis/FlowSensitive/ASTOps.h
+clang/include/clang/Analysis/FlowSensitive/CNFFormula.h
clang/include/clang/Analysis/FlowSensitive/DataflowAnalysis.h
clang/include/clang/Analysis/FlowSensitive/DataflowAnalysisContext.h
clang/include/clang/Analysis/FlowSensitive/DataflowEnvironment.h
@@ -621,6 +622,7 @@ clang/tools/libclang/CXCursor.h
clang/tools/scan-build-py/tests/functional/src/include/clean-one.h
clang/unittests/Analysis/CFGBuildResult.h
clang/unittests/Analysis/MacroExpansionContextTest.cpp
+clang/unittests/Analysis/FlowSensitive/CNFFormula.cpp
clang/unittests/Analysis/FlowSensitive/DataflowAnalysisContextTest.cpp
clang/unittests/Analysis/FlowSensitive/DataflowEnvironmentTest.cpp
clang/unittests/Analysis/FlowSensitive/MapLatticeTest.cpp
@@ -632,6 +634,7 @@ clang/unittests/Analysis/FlowSensitive/TestingSupport.cpp
clang/unittests/Analysis/FlowSensitive/TestingSupport.h
clang/unittests/Analysis/FlowSensitive/TestingSupportTest.cpp
clang/unittests/Analysis/FlowSensitive/TypeErasedDataflowAnalysisTest.cpp
+clang/unittests/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
clang/unittests/Analysis/FlowSensitive/WatchedLiteralsSolverTest.cpp
clang/unittests/AST/ASTImporterFixtures.cpp
clang/unittests/AST/ASTImporterFixtures.h
diff --git a/clang/include/clang/AST/ASTContext.h b/clang/include/clang/AST/ASTContext.h
index e03b11219478..2ce2b810d363 100644
--- a/clang/include/clang/AST/ASTContext.h
+++ b/clang/include/clang/AST/ASTContext.h
@@ -2611,7 +2611,7 @@ public:
///
/// \returns if this is an array type, the completely unqualified array type
/// that corresponds to it. Otherwise, returns T.getUnqualifiedType().
- QualType getUnqualifiedArrayType(QualType T, Qualifiers &Quals);
+ QualType getUnqualifiedArrayType(QualType T, Qualifiers &Quals) const;
/// Determine whether the given types are equivalent after
/// cvr-qualifiers have been removed.
diff --git a/clang/include/clang/AST/ASTNodeTraverser.h b/clang/include/clang/AST/ASTNodeTraverser.h
index bf7c204e4ad7..98db1cb57899 100644
--- a/clang/include/clang/AST/ASTNodeTraverser.h
+++ b/clang/include/clang/AST/ASTNodeTraverser.h
@@ -695,7 +695,7 @@ public:
if (const auto *TC = D->getTypeConstraint())
Visit(TC->getImmediatelyDeclaredConstraint());
if (D->hasDefaultArgument())
- Visit(D->getDefaultArgument(), SourceRange(),
+ Visit(D->getDefaultArgument().getArgument(), SourceRange(),
D->getDefaultArgStorage().getInheritedFrom(),
D->defaultArgumentWasInherited() ? "inherited from" : "previous");
}
diff --git a/clang/include/clang/AST/DeclTemplate.h b/clang/include/clang/AST/DeclTemplate.h
index f3d6a321ecf1..07b08b5ed43c 100644
--- a/clang/include/clang/AST/DeclTemplate.h
+++ b/clang/include/clang/AST/DeclTemplate.h
@@ -1185,7 +1185,7 @@ class TemplateTypeParmDecl final : public TypeDecl,
/// The default template argument, if any.
using DefArgStorage =
- DefaultArgStorage<TemplateTypeParmDecl, TypeSourceInfo *>;
+ DefaultArgStorage<TemplateTypeParmDecl, TemplateArgumentLoc *>;
DefArgStorage DefaultArgument;
TemplateTypeParmDecl(DeclContext *DC, SourceLocation KeyLoc,
@@ -1225,13 +1225,9 @@ public:
bool hasDefaultArgument() const { return DefaultArgument.isSet(); }
/// Retrieve the default argument, if any.
- QualType getDefaultArgument() const {
- return DefaultArgument.get()->getType();
- }
-
- /// Retrieves the default argument's source information, if any.
- TypeSourceInfo *getDefaultArgumentInfo() const {
- return DefaultArgument.get();
+ const TemplateArgumentLoc &getDefaultArgument() const {
+ static const TemplateArgumentLoc NoneLoc;
+ return DefaultArgument.isSet() ? *DefaultArgument.get() : NoneLoc;
}
/// Retrieves the location of the default argument declaration.
@@ -1244,9 +1240,8 @@ public:
}
/// Set the default argument for this template parameter.
- void setDefaultArgument(TypeSourceInfo *DefArg) {
- DefaultArgument.set(DefArg);
- }
+ void setDefaultArgument(const ASTContext &C,
+ const TemplateArgumentLoc &DefArg);
/// Set that this default argument was inherited from another
/// parameter.
diff --git a/clang/include/clang/AST/ExprCXX.h b/clang/include/clang/AST/ExprCXX.h
index fac65628ffed..dbf693611a7f 100644
--- a/clang/include/clang/AST/ExprCXX.h
+++ b/clang/include/clang/AST/ExprCXX.h
@@ -4377,15 +4377,21 @@ class PackIndexingExpr final
// The pack being indexed, followed by the index
Stmt *SubExprs[2];
- size_t TransformedExpressions;
+ // The size of the trailing expressions.
+ unsigned TransformedExpressions : 31;
+
+ LLVM_PREFERRED_TYPE(bool)
+ unsigned ExpandedToEmptyPack : 1;
PackIndexingExpr(QualType Type, SourceLocation EllipsisLoc,
SourceLocation RSquareLoc, Expr *PackIdExpr, Expr *IndexExpr,
- ArrayRef<Expr *> SubstitutedExprs = {})
+ ArrayRef<Expr *> SubstitutedExprs = {},
+ bool ExpandedToEmptyPack = false)
: Expr(PackIndexingExprClass, Type, VK_LValue, OK_Ordinary),
EllipsisLoc(EllipsisLoc), RSquareLoc(RSquareLoc),
SubExprs{PackIdExpr, IndexExpr},
- TransformedExpressions(SubstitutedExprs.size()) {
+ TransformedExpressions(SubstitutedExprs.size()),
+ ExpandedToEmptyPack(ExpandedToEmptyPack) {
auto *Exprs = getTrailingObjects<Expr *>();
std::uninitialized_copy(SubstitutedExprs.begin(), SubstitutedExprs.end(),
@@ -4408,10 +4414,14 @@ public:
SourceLocation EllipsisLoc,
SourceLocation RSquareLoc, Expr *PackIdExpr,
Expr *IndexExpr, std::optional<int64_t> Index,
- ArrayRef<Expr *> SubstitutedExprs = {});
+ ArrayRef<Expr *> SubstitutedExprs = {},
+ bool ExpandedToEmptyPack = false);
static PackIndexingExpr *CreateDeserialized(ASTContext &Context,
unsigned NumTransformedExprs);
+ /// Determine if the expression was expanded to empty.
+ bool expandsToEmptyPack() const { return ExpandedToEmptyPack; }
+
/// Determine the location of the 'sizeof' keyword.
SourceLocation getEllipsisLoc() const { return EllipsisLoc; }
@@ -4445,6 +4455,7 @@ public:
return getTrailingObjects<Expr *>()[*Index];
}
+ /// Return the trailing expressions, regardless of the expansion.
ArrayRef<Expr *> getExpressions() const {
return {getTrailingObjects<Expr *>(), TransformedExpressions};
}
diff --git a/clang/include/clang/AST/OpenACCClause.h b/clang/include/clang/AST/OpenACCClause.h
index 607a2b9d6536..28ff8c44bd25 100644
--- a/clang/include/clang/AST/OpenACCClause.h
+++ b/clang/include/clang/AST/OpenACCClause.h
@@ -677,6 +677,35 @@ public:
ArrayRef<Expr *> VarList, SourceLocation EndLoc);
};
+class OpenACCReductionClause final
+ : public OpenACCClauseWithVarList,
+ public llvm::TrailingObjects<OpenACCReductionClause, Expr *> {
+ OpenACCReductionOperator Op;
+
+ OpenACCReductionClause(SourceLocation BeginLoc, SourceLocation LParenLoc,
+ OpenACCReductionOperator Operator,
+ ArrayRef<Expr *> VarList, SourceLocation EndLoc)
+ : OpenACCClauseWithVarList(OpenACCClauseKind::Reduction, BeginLoc,
+ LParenLoc, EndLoc),
+ Op(Operator) {
+ std::uninitialized_copy(VarList.begin(), VarList.end(),
+ getTrailingObjects<Expr *>());
+ setExprs(MutableArrayRef(getTrailingObjects<Expr *>(), VarList.size()));
+ }
+
+public:
+ static bool classof(const OpenACCClause *C) {
+ return C->getClauseKind() == OpenACCClauseKind::Reduction;
+ }
+
+ static OpenACCReductionClause *
+ Create(const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc,
+ OpenACCReductionOperator Operator, ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc);
+
+ OpenACCReductionOperator getReductionOp() const { return Op; }
+};
+
template <class Impl> class OpenACCClauseVisitor {
Impl &getDerived() { return static_cast<Impl &>(*this); }
diff --git a/clang/include/clang/AST/RecursiveASTVisitor.h b/clang/include/clang/AST/RecursiveASTVisitor.h
index f5cefedb07e0..659e4cdd1037 100644
--- a/clang/include/clang/AST/RecursiveASTVisitor.h
+++ b/clang/include/clang/AST/RecursiveASTVisitor.h
@@ -1960,7 +1960,7 @@ DEF_TRAVERSE_DECL(TemplateTypeParmDecl, {
TRY_TO(TraverseType(QualType(D->getTypeForDecl(), 0)));
TRY_TO(TraverseTemplateTypeParamDeclConstraints(D));
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
- TRY_TO(TraverseTypeLoc(D->getDefaultArgumentInfo()->getTypeLoc()));
+ TRY_TO(TraverseTemplateArgumentLoc(D->getDefaultArgument()));
})
DEF_TRAVERSE_DECL(TypedefDecl, {
diff --git a/clang/include/clang/AST/Type.h b/clang/include/clang/AST/Type.h
index da3834f19ca0..9a5c6e8d562c 100644
--- a/clang/include/clang/AST/Type.h
+++ b/clang/include/clang/AST/Type.h
@@ -2523,6 +2523,7 @@ public:
bool isVectorType() const; // GCC vector type.
bool isExtVectorType() const; // Extended vector type.
bool isExtVectorBoolType() const; // Extended vector type with bool element.
+ bool isSubscriptableVectorType() const;
bool isMatrixType() const; // Matrix type.
bool isConstantMatrixType() const; // Constant matrix type.
bool isDependentAddressSpaceType() const; // value-dependent address space qualifier
@@ -7729,6 +7730,10 @@ inline bool Type::isExtVectorBoolType() const {
return cast<ExtVectorType>(CanonicalType)->getElementType()->isBooleanType();
}
+inline bool Type::isSubscriptableVectorType() const {
+ return isVectorType() || isSveVLSBuiltinType();
+}
+
inline bool Type::isMatrixType() const {
return isa<MatrixType>(CanonicalType);
}
diff --git a/clang/include/clang/Analysis/FlowSensitive/CNFFormula.h b/clang/include/clang/Analysis/FlowSensitive/CNFFormula.h
new file mode 100644
index 000000000000..fb13e774c67f
--- /dev/null
+++ b/clang/include/clang/Analysis/FlowSensitive/CNFFormula.h
@@ -0,0 +1,179 @@
+//===- CNFFormula.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A representation of a boolean formula in 3-CNF.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CNFFORMULA_H
+#define LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CNFFORMULA_H
+
+#include <cstdint>
+#include <vector>
+
+#include "clang/Analysis/FlowSensitive/Formula.h"
+
+namespace clang {
+namespace dataflow {
+
+/// Boolean variables are represented as positive integers.
+using Variable = uint32_t;
+
+/// A null boolean variable is used as a placeholder in various data structures
+/// and algorithms.
+constexpr Variable NullVar = 0;
+
+/// Literals are represented as positive integers. Specifically, for a boolean
+/// variable `V` that is represented as the positive integer `I`, the positive
+/// literal `V` is represented as the integer `2*I` and the negative literal
+/// `!V` is represented as the integer `2*I+1`.
+using Literal = uint32_t;
+
+/// A null literal is used as a placeholder in various data structures and
+/// algorithms.
+constexpr Literal NullLit = 0;
+
+/// Clause identifiers are represented as positive integers.
+using ClauseID = uint32_t;
+
+/// A null clause identifier is used as a placeholder in various data structures
+/// and algorithms.
+constexpr ClauseID NullClause = 0;
+
+/// Returns the positive literal `V`.
+inline constexpr Literal posLit(Variable V) { return 2 * V; }
+
+/// Returns the negative literal `!V`.
+inline constexpr Literal negLit(Variable V) { return 2 * V + 1; }
+
+/// Returns whether `L` is a positive literal.
+inline constexpr bool isPosLit(Literal L) { return 0 == (L & 1); }
+
+/// Returns whether `L` is a negative literal.
+inline constexpr bool isNegLit(Literal L) { return 1 == (L & 1); }
+
+/// Returns the negated literal `!L`.
+inline constexpr Literal notLit(Literal L) { return L ^ 1; }
+
+/// Returns the variable of `L`.
+inline constexpr Variable var(Literal L) { return L >> 1; }
+
+/// A boolean formula in 3-CNF (conjunctive normal form with at most 3 literals
+/// per clause).
+class CNFFormula {
+ /// `LargestVar` is equal to the largest positive integer that represents a
+ /// variable in the formula.
+ const Variable LargestVar;
+
+ /// Literals of all clauses in the formula.
+ ///
+ /// The element at index 0 stands for the literal in the null clause. It is
+ /// set to 0 and isn't used. Literals of clauses in the formula start from the
+ /// element at index 1.
+ ///
+ /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
+ /// `Clauses` will be `[0, L1, L2, L2, L3, L4]`.
+ std::vector<Literal> Clauses;
+
+ /// Start indices of clauses of the formula in `Clauses`.
+ ///
+ /// The element at index 0 stands for the start index of the null clause. It
+ /// is set to 0 and isn't used. Start indices of clauses in the formula start
+ /// from the element at index 1.
+ ///
+ /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
+ /// `ClauseStarts` will be `[0, 1, 3]`. Note that the literals of the first
+ /// clause always start at index 1. The start index for the literals of the
+ /// second clause depends on the size of the first clause and so on.
+ std::vector<size_t> ClauseStarts;
+
+ /// Indicates that we already know the formula is unsatisfiable.
+ /// During construction, we catch simple cases of conflicting unit-clauses.
+ bool KnownContradictory;
+
+public:
+ explicit CNFFormula(Variable LargestVar);
+
+ /// Adds the `L1 v ... v Ln` clause to the formula.
+ /// Requirements:
+ ///
+ /// `Li` must not be `NullLit`.
+ ///
+ /// All literals in the input that are not `NullLit` must be distinct.
+ void addClause(ArrayRef<Literal> lits);
+
+ /// Returns whether the formula is known to be contradictory.
+ /// This is the case if any of the clauses is empty.
+ bool knownContradictory() const { return KnownContradictory; }
+
+ /// Returns the largest variable in the formula.
+ Variable largestVar() const { return LargestVar; }
+
+ /// Returns the number of clauses in the formula.
+ /// Valid clause IDs are in the range [1, `numClauses()`].
+ ClauseID numClauses() const { return ClauseStarts.size() - 1; }
+
+ /// Returns the number of literals in clause `C`.
+ size_t clauseSize(ClauseID C) const {
+ return C == ClauseStarts.size() - 1 ? Clauses.size() - ClauseStarts[C]
+ : ClauseStarts[C + 1] - ClauseStarts[C];
+ }
+
+ /// Returns the literals of clause `C`.
+ /// If `knownContradictory()` is false, each clause has at least one literal.
+ llvm::ArrayRef<Literal> clauseLiterals(ClauseID C) const {
+ size_t S = clauseSize(C);
+ if (S == 0)
+ return llvm::ArrayRef<Literal>();
+ return llvm::ArrayRef<Literal>(&Clauses[ClauseStarts[C]], S);
+ }
+
+ /// An iterator over all literals of all clauses in the formula.
+ /// The iterator allows mutation of the literal through the `*` operator.
+ /// This is to support solvers that mutate the formula during solving.
+ class Iterator {
+ friend class CNFFormula;
+ CNFFormula *CNF;
+ size_t Idx;
+ Iterator(CNFFormula *CNF, size_t Idx) : CNF(CNF), Idx(Idx) {}
+
+ public:
+ Iterator(const Iterator &) = default;
+ Iterator &operator=(const Iterator &) = default;
+
+ Iterator &operator++() {
+ ++Idx;
+ assert(Idx < CNF->Clauses.size() && "Iterator out of bounds");
+ return *this;
+ }
+
+ Iterator next() const {
+ Iterator I = *this;
+ ++I;
+ return I;
+ }
+
+ Literal &operator*() const { return CNF->Clauses[Idx]; }
+ };
+ friend class Iterator;
+
+ /// Returns an iterator to the first literal of clause `C`.
+ Iterator startOfClause(ClauseID C) { return Iterator(this, ClauseStarts[C]); }
+};
+
+/// Converts the conjunction of `Vals` into a formula in conjunctive normal
+/// form where each clause has at least one and at most three literals.
+/// `Atomics` is populated with a mapping from `Variables` to the corresponding
+/// `Atom`s for atomic booleans in the input formulas.
+CNFFormula buildCNF(const llvm::ArrayRef<const Formula *> &Formulas,
+ llvm::DenseMap<Variable, Atom> &Atomics);
+
+} // namespace dataflow
+} // namespace clang
+
+#endif // LLVM_CLANG_ANALYSIS_FLOWSENSITIVE_CNFFORMULA_H
diff --git a/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h b/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h
index b5cd7aa10fd7..d74380b78e93 100644
--- a/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h
+++ b/clang/include/clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h
@@ -17,16 +17,17 @@
#include "clang/Analysis/FlowSensitive/Formula.h"
#include "clang/Analysis/FlowSensitive/Solver.h"
#include "llvm/ADT/ArrayRef.h"
-#include <limits>
namespace clang {
namespace dataflow {
/// A SAT solver that is an implementation of Algorithm D from Knuth's The Art
/// of Computer Programming Volume 4: Satisfiability, Fascicle 6. It is based on
-/// the Davis-Putnam-Logemann-Loveland (DPLL) algorithm, keeps references to a
-/// single "watched" literal per clause, and uses a set of "active" variables
+/// the Davis-Putnam-Logemann-Loveland (DPLL) algorithm [1], keeps references to
+/// a single "watched" literal per clause, and uses a set of "active" variables
/// for unit propagation.
+//
+// [1] https://en.wikipedia.org/wiki/DPLL_algorithm
class WatchedLiteralsSolver : public Solver {
// Count of the iterations of the main loop of the solver. This spans *all*
// calls to the underlying solver across the life of this object. It is
diff --git a/clang/include/clang/Basic/DiagnosticDriverKinds.td b/clang/include/clang/Basic/DiagnosticDriverKinds.td
index 9d97a75f696f..50d3b42c0f86 100644
--- a/clang/include/clang/Basic/DiagnosticDriverKinds.td
+++ b/clang/include/clang/Basic/DiagnosticDriverKinds.td
@@ -599,9 +599,6 @@ def warn_drv_unsupported_gpopt : Warning<
"ignoring '-mgpopt' option as it cannot be used with %select{|the implicit"
" usage of }0-mabicalls">,
InGroup<UnsupportedGPOpt>;
-def warn_drv_unsupported_tocdata: Warning<
- "ignoring '-mtocdata' as it is only supported for -mcmodel=small">,
- InGroup<OptionIgnored>;
def warn_drv_unsupported_sdata : Warning<
"ignoring '-msmall-data-limit=' with -mcmodel=large for -fpic or RV64">,
InGroup<OptionIgnored>;
diff --git a/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td b/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td
index 674742431dcb..944b2a38b6e9 100644
--- a/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td
+++ b/clang/include/clang/Basic/DiagnosticInstallAPIKinds.td
@@ -24,7 +24,7 @@ def err_no_matching_target : Error<"no matching target found for target variant
def err_unsupported_vendor : Error<"vendor '%0' is not supported: '%1'">;
def err_unsupported_environment : Error<"environment '%0' is not supported: '%1'">;
def err_unsupported_os : Error<"os '%0' is not supported: '%1'">;
-def err_cannot_read_input_list : Error<"could not read %select{alias list|filelist}0 '%1': %2">;
+def err_cannot_read_input_list : Error<"could not read %0 input list '%1': %2">;
def err_invalid_label: Error<"label '%0' is reserved: use a different label name for -X<label>">;
} // end of command line category.
diff --git a/clang/include/clang/Basic/DiagnosticSemaKinds.td b/clang/include/clang/Basic/DiagnosticSemaKinds.td
index e3b4186f1b06..5a32463763aa 100644
--- a/clang/include/clang/Basic/DiagnosticSemaKinds.td
+++ b/clang/include/clang/Basic/DiagnosticSemaKinds.td
@@ -310,7 +310,7 @@ def err_invalid_vector_long_double_decl_spec : Error<
def err_invalid_vector_complex_decl_spec : Error<
"cannot use '_Complex' with '__vector'">;
def warn_vector_long_decl_spec_combination : Warning<
- "Use of 'long' with '__vector' is deprecated">, InGroup<Deprecated>;
+ "use of 'long' with '__vector' is deprecated">, InGroup<Deprecated>;
def err_redeclaration_different_type : Error<
"redeclaration of %0 with a different type%diff{: $ vs $|}1,2">;
@@ -3975,7 +3975,7 @@ def warn_acquired_before : Warning<
"%0 '%1' must be acquired before '%2'">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
def warn_acquired_before_after_cycle : Warning<
- "Cycle in acquired_before/after dependencies, starting with '%0'">,
+ "cycle in acquired_before/after dependencies, starting with '%0'">,
InGroup<ThreadSafetyAnalysis>, DefaultIgnore;
@@ -8003,9 +8003,9 @@ def warn_deprecated_altivec_src_compat : Warning<
InGroup<DiagGroup<"deprecated-altivec-src-compat">>;
def warn_deprecated_lax_vec_conv_all : Warning<
- "Implicit conversion between vector types ('%0' and '%1') is deprecated. "
- "In the future, the behavior implied by '-fno-lax-vector-conversions' "
- "will be the default.">,
+ "implicit conversion between vector types ('%0' and '%1') is deprecated; "
+ "in the future, the behavior implied by '-fno-lax-vector-conversions' "
+ "will be the default">,
InGroup<DiagGroup<"deprecate-lax-vec-conv-all">>;
def err_catch_incomplete_ptr : Error<
@@ -8853,7 +8853,7 @@ def err_atomic_exclusive_builtin_pointer_size : Error<
"address argument to load or store exclusive builtin must be a pointer to"
" 1,2,4 or 8 byte type (%0 invalid)">;
def err_atomic_builtin_ext_int_size : Error<
- "Atomic memory operand must have a power-of-two size">;
+ "atomic memory operand must have a power-of-two size">;
def err_atomic_builtin_bit_int_prohibit : Error<
"argument to atomic builtin of type '_BitInt' is not supported">;
def err_atomic_op_needs_atomic : Error<
@@ -9464,8 +9464,6 @@ def err_static_data_member_not_allowed_in_local_class : Error<
def err_base_clause_on_union : Error<"unions cannot have base classes">;
def err_base_must_be_class : Error<"base specifier must name a class">;
def err_union_as_base_class : Error<"unions cannot be base classes">;
-def err_circular_inheritance : Error<
- "circular inheritance between %0 and %1">;
def err_base_class_has_flexible_array_member : Error<
"base class %0 has a flexible array member">;
def err_incomplete_base_class : Error<"base class has incomplete type">;
@@ -12345,7 +12343,8 @@ def err_acc_num_gangs_num_args
"provided}0">;
def err_acc_not_a_var_ref
: Error<"OpenACC variable is not a valid variable name, sub-array, array "
- "element, or composite variable member">;
+ "element,%select{| member of a composite variable,}0 or composite "
+ "variable member">;
def err_acc_typecheck_subarray_value
: Error<"OpenACC sub-array subscripted value is not an array or pointer">;
def err_acc_subarray_function_type
@@ -12376,5 +12375,18 @@ def note_acc_expected_pointer_var : Note<"expected variable of pointer type">;
def err_acc_clause_after_device_type
: Error<"OpenACC clause '%0' may not follow a '%1' clause in a "
"compute construct">;
-
+def err_acc_reduction_num_gangs_conflict
+ : Error<
+ "OpenACC 'reduction' clause may not appear on a 'parallel' construct "
+ "with a 'num_gangs' clause with more than 1 argument, have %0">;
+def err_acc_reduction_type
+ : Error<"OpenACC 'reduction' variable must be of scalar type, sub-array, or a "
+ "composite of scalar types;%select{| sub-array base}1 type is %0">;
+def err_acc_reduction_composite_type
+ : Error<"OpenACC 'reduction' variable must be a composite of scalar types; "
+ "%1 %select{is not a class or struct|is incomplete|is not an "
+ "aggregate}0">;
+def err_acc_reduction_composite_member_type :Error<
+ "OpenACC 'reduction' composite variable must not have non-scalar field">;
+def note_acc_reduction_composite_member_loc : Note<"invalid field is here">;
} // end of sema component.
diff --git a/clang/include/clang/Basic/OpenACCClauses.def b/clang/include/clang/Basic/OpenACCClauses.def
index 7ecc51799468..3e464abaafd9 100644
--- a/clang/include/clang/Basic/OpenACCClauses.def
+++ b/clang/include/clang/Basic/OpenACCClauses.def
@@ -46,6 +46,7 @@ VISIT_CLAUSE(NumGangs)
VISIT_CLAUSE(NumWorkers)
VISIT_CLAUSE(Present)
VISIT_CLAUSE(Private)
+VISIT_CLAUSE(Reduction)
VISIT_CLAUSE(Self)
VISIT_CLAUSE(VectorLength)
VISIT_CLAUSE(Wait)
diff --git a/clang/include/clang/Basic/OpenACCKinds.h b/clang/include/clang/Basic/OpenACCKinds.h
index 0e38a04e7164..7b9d619a8aec 100644
--- a/clang/include/clang/Basic/OpenACCKinds.h
+++ b/clang/include/clang/Basic/OpenACCKinds.h
@@ -514,6 +514,42 @@ enum class OpenACCReductionOperator {
/// Invalid Reduction Clause Kind.
Invalid,
};
+
+template <typename StreamTy>
+inline StreamTy &printOpenACCReductionOperator(StreamTy &Out,
+ OpenACCReductionOperator Op) {
+ switch (Op) {
+ case OpenACCReductionOperator::Addition:
+ return Out << "+";
+ case OpenACCReductionOperator::Multiplication:
+ return Out << "*";
+ case OpenACCReductionOperator::Max:
+ return Out << "max";
+ case OpenACCReductionOperator::Min:
+ return Out << "min";
+ case OpenACCReductionOperator::BitwiseAnd:
+ return Out << "&";
+ case OpenACCReductionOperator::BitwiseOr:
+ return Out << "|";
+ case OpenACCReductionOperator::BitwiseXOr:
+ return Out << "^";
+ case OpenACCReductionOperator::And:
+ return Out << "&&";
+ case OpenACCReductionOperator::Or:
+ return Out << "||";
+ case OpenACCReductionOperator::Invalid:
+ return Out << "<invalid>";
+ }
+ llvm_unreachable("Unknown reduction operator kind");
+}
+inline const StreamingDiagnostic &operator<<(const StreamingDiagnostic &Out,
+ OpenACCReductionOperator Op) {
+ return printOpenACCReductionOperator(Out, Op);
+}
+inline llvm::raw_ostream &operator<<(llvm::raw_ostream &Out,
+ OpenACCReductionOperator Op) {
+ return printOpenACCReductionOperator(Out, Op);
+}
} // namespace clang
#endif // LLVM_CLANG_BASIC_OPENACCKINDS_H
diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h
index 74893f23210c..274b45d1bc58 100644
--- a/clang/include/clang/Format/Format.h
+++ b/clang/include/clang/Format/Format.h
@@ -5239,7 +5239,7 @@ tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
/// Returns the replacements corresponding to applying and formatting
/// \p Replaces on success; otheriwse, return an llvm::Error carrying
/// llvm::StringError.
-llvm::Expected<tooling::Replacements>
+Expected<tooling::Replacements>
formatReplacements(StringRef Code, const tooling::Replacements &Replaces,
const FormatStyle &Style);
@@ -5256,7 +5256,7 @@ formatReplacements(StringRef Code, const tooling::Replacements &Replaces,
/// The include manipulation is done via ``tooling::HeaderInclude``, see its
/// documentation for more details on how include insertion points are found and
/// what edits are produced.
-llvm::Expected<tooling::Replacements>
+Expected<tooling::Replacements>
cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces,
const FormatStyle &Style);
@@ -5381,11 +5381,10 @@ extern const char *DefaultFallbackStyle;
/// \returns FormatStyle as specified by ``StyleName``. If ``StyleName`` is
/// "file" and no file is found, returns ``FallbackStyle``. If no style could be
/// determined, returns an Error.
-llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
- StringRef FallbackStyle,
- StringRef Code = "",
- llvm::vfs::FileSystem *FS = nullptr,
- bool AllowUnknownOptions = false);
+Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
+ StringRef FallbackStyle, StringRef Code = "",
+ llvm::vfs::FileSystem *FS = nullptr,
+ bool AllowUnknownOptions = false);
// Guesses the language from the ``FileName`` and ``Code`` to be formatted.
// Defaults to FormatStyle::LK_Cpp.
diff --git a/clang/include/clang/Parse/Parser.h b/clang/include/clang/Parse/Parser.h
index 5f04664141d2..cc6d93384f80 100644
--- a/clang/include/clang/Parse/Parser.h
+++ b/clang/include/clang/Parse/Parser.h
@@ -2814,7 +2814,7 @@ private:
SourceLocation CorrectLocation);
void stripTypeAttributesOffDeclSpec(ParsedAttributes &Attrs, DeclSpec &DS,
- Sema::TagUseKind TUK);
+ TagUseKind TUK);
// FixItLoc = possible correct location for the attributes
void ProhibitAttributes(ParsedAttributes &Attrs,
@@ -3686,9 +3686,9 @@ private:
using OpenACCVarParseResult = std::pair<ExprResult, OpenACCParseCanContinue>;
/// Parses a single variable in a variable list for OpenACC.
- OpenACCVarParseResult ParseOpenACCVar();
+ OpenACCVarParseResult ParseOpenACCVar(OpenACCClauseKind CK);
/// Parses the variable list for the variety of places that take a var-list.
- llvm::SmallVector<Expr *> ParseOpenACCVarList();
+ llvm::SmallVector<Expr *> ParseOpenACCVarList(OpenACCClauseKind CK);
/// Parses any parameters for an OpenACC Clause, including required/optional
/// parens.
OpenACCClauseParseResult
diff --git a/clang/include/clang/Sema/Sema.h b/clang/include/clang/Sema/Sema.h
index 6c89d275215d..39da728c616c 100644
--- a/clang/include/clang/Sema/Sema.h
+++ b/clang/include/clang/Sema/Sema.h
@@ -174,6 +174,7 @@ class SemaHLSL;
class SemaObjC;
class SemaOpenACC;
class SemaOpenMP;
+class SemaPseudoObject;
class SemaSYCL;
class StandardConversionSequence;
class Stmt;
@@ -446,6 +447,13 @@ enum class CheckedConversionKind {
ForBuiltinOverloadedOp
};
+enum class TagUseKind {
+ Reference, // Reference to a tag: 'struct foo *X;'
+ Declaration, // Fwd decl of a tag: 'struct foo;'
+ Definition, // Definition of a tag: 'struct foo { int X; } Y;'
+ Friend // Friend declaration: 'friend struct foo;'
+};
+
/// Sema - This implements semantic analysis and AST building for C.
/// \nosubgrouping
class Sema final : public SemaBase {
@@ -471,20 +479,19 @@ class Sema final : public SemaBase {
// 18. Name Lookup (SemaLookup.cpp)
// 19. Modules (SemaModule.cpp)
// 20. C++ Overloading (SemaOverload.cpp)
- // 21. Pseudo-Object (SemaPseudoObject.cpp)
- // 22. Statements (SemaStmt.cpp)
- // 23. `inline asm` Statement (SemaStmtAsm.cpp)
- // 24. Statement Attribute Handling (SemaStmtAttr.cpp)
- // 25. C++ Templates (SemaTemplate.cpp)
- // 26. C++ Template Argument Deduction (SemaTemplateDeduction.cpp)
- // 27. C++ Template Instantiation (SemaTemplateInstantiate.cpp)
- // 28. C++ Template Declaration Instantiation
+ // 21. Statements (SemaStmt.cpp)
+ // 22. `inline asm` Statement (SemaStmtAsm.cpp)
+ // 23. Statement Attribute Handling (SemaStmtAttr.cpp)
+ // 24. C++ Templates (SemaTemplate.cpp)
+ // 25. C++ Template Argument Deduction (SemaTemplateDeduction.cpp)
+ // 26. C++ Template Instantiation (SemaTemplateInstantiate.cpp)
+ // 27. C++ Template Declaration Instantiation
// (SemaTemplateInstantiateDecl.cpp)
- // 29. C++ Variadic Templates (SemaTemplateVariadic.cpp)
- // 30. Constraints and Concepts (SemaConcept.cpp)
- // 31. Types (SemaType.cpp)
- // 32. FixIt Helpers (SemaFixItUtils.cpp)
- // 33. Name Lookup for RISC-V Vector Intrinsic (SemaRISCVVectorLookup.cpp)
+ // 28. C++ Variadic Templates (SemaTemplateVariadic.cpp)
+ // 29. Constraints and Concepts (SemaConcept.cpp)
+ // 30. Types (SemaType.cpp)
+ // 31. FixIt Helpers (SemaFixItUtils.cpp)
+ // 32. Name Lookup for RISC-V Vector Intrinsic (SemaRISCVVectorLookup.cpp)
/// \name Semantic Analysis
/// Implementations are in Sema.cpp
@@ -1015,6 +1022,11 @@ public:
return *OpenMPPtr;
}
+ SemaPseudoObject &PseudoObject() {
+ assert(PseudoObjectPtr);
+ return *PseudoObjectPtr;
+ }
+
SemaSYCL &SYCL() {
assert(SYCLPtr);
return *SYCLPtr;
@@ -1056,6 +1068,7 @@ private:
std::unique_ptr<SemaObjC> ObjCPtr;
std::unique_ptr<SemaOpenACC> OpenACCPtr;
std::unique_ptr<SemaOpenMP> OpenMPPtr;
+ std::unique_ptr<SemaPseudoObject> PseudoObjectPtr;
std::unique_ptr<SemaSYCL> SYCLPtr;
///@}
@@ -3162,13 +3175,6 @@ public:
bool isDefinition, SourceLocation NewTagLoc,
const IdentifierInfo *Name);
- enum TagUseKind {
- TUK_Reference, // Reference to a tag: 'struct foo *X;'
- TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
- TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
- TUK_Friend // Friend declaration: 'friend struct foo;'
- };
-
enum OffsetOfKind {
// Not parsing a type within __builtin_offsetof.
OOK_Outside,
@@ -5375,11 +5381,9 @@ public:
bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R,
bool HasTrailingLParen);
- ExprResult
- BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
- const DeclarationNameInfo &NameInfo,
- bool IsAddressOfOperand, const Scope *S,
- TypeSourceInfo **RecoveryTSI = nullptr);
+ ExprResult BuildQualifiedDeclarationNameExpr(
+ CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
+ bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R,
bool NeedsADL,
@@ -6371,6 +6375,8 @@ public:
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
+ void maybeExtendBlockObject(ExprResult &E);
+
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
@@ -8369,29 +8375,6 @@ public:
//
//
- /// \name Pseudo-Object
- /// Implementations are in SemaPseudoObject.cpp
- ///@{
-
-public:
- void maybeExtendBlockObject(ExprResult &E);
-
- ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
- UnaryOperatorKind Opcode, Expr *Op);
- ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
- BinaryOperatorKind Opcode, Expr *LHS,
- Expr *RHS);
- ExprResult checkPseudoObjectRValue(Expr *E);
- Expr *recreateSyntacticForm(PseudoObjectExpr *E);
-
- ///@}
-
- //
- //
- // -------------------------------------------------------------------------
- //
- //
-
/// \name Statements
/// Implementations are in SemaStmt.cpp
///@{
@@ -8991,7 +8974,8 @@ public:
ExprResult
BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *TemplateArgs);
+ const TemplateArgumentListInfo *TemplateArgs,
+ bool IsAddressOfOperand);
TemplateNameKind ActOnTemplateName(Scope *S, CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
@@ -10083,7 +10067,9 @@ public:
bool SubstTemplateArgument(const TemplateArgumentLoc &Input,
const MultiLevelTemplateArgumentList &TemplateArgs,
- TemplateArgumentLoc &Output);
+ TemplateArgumentLoc &Output,
+ SourceLocation Loc = {},
+ const DeclarationName &Entity = {});
bool
SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args,
const MultiLevelTemplateArgumentList &TemplateArgs,
diff --git a/clang/include/clang/Sema/SemaOpenACC.h b/clang/include/clang/Sema/SemaOpenACC.h
index f838fa97d33a..6f69fa08939b 100644
--- a/clang/include/clang/Sema/SemaOpenACC.h
+++ b/clang/include/clang/Sema/SemaOpenACC.h
@@ -66,9 +66,14 @@ public:
struct DeviceTypeDetails {
SmallVector<DeviceTypeArgument> Archs;
};
+ struct ReductionDetails {
+ OpenACCReductionOperator Op;
+ SmallVector<Expr *> VarList;
+ };
std::variant<std::monostate, DefaultDetails, ConditionDetails,
- IntExprDetails, VarListDetails, WaitDetails, DeviceTypeDetails>
+ IntExprDetails, VarListDetails, WaitDetails, DeviceTypeDetails,
+ ReductionDetails>
Details = std::monostate{};
public:
@@ -170,6 +175,10 @@ public:
return const_cast<OpenACCParsedClause *>(this)->getIntExprs();
}
+ OpenACCReductionOperator getReductionOp() const {
+ return std::get<ReductionDetails>(Details).Op;
+ }
+
ArrayRef<Expr *> getVarList() {
assert((ClauseKind == OpenACCClauseKind::Private ||
ClauseKind == OpenACCClauseKind::NoCreate ||
@@ -188,8 +197,13 @@ public:
ClauseKind == OpenACCClauseKind::PresentOrCreate ||
ClauseKind == OpenACCClauseKind::Attach ||
ClauseKind == OpenACCClauseKind::DevicePtr ||
+ ClauseKind == OpenACCClauseKind::Reduction ||
ClauseKind == OpenACCClauseKind::FirstPrivate) &&
"Parsed clause kind does not have a var-list");
+
+ if (ClauseKind == OpenACCClauseKind::Reduction)
+ return std::get<ReductionDetails>(Details).VarList;
+
return std::get<VarListDetails>(Details).VarList;
}
@@ -334,6 +348,13 @@ public:
Details = VarListDetails{std::move(VarList), IsReadOnly, IsZero};
}
+ void setReductionDetails(OpenACCReductionOperator Op,
+ llvm::SmallVector<Expr *> &&VarList) {
+ assert(ClauseKind == OpenACCClauseKind::Reduction &&
+ "reduction details only valid on reduction");
+ Details = ReductionDetails{Op, std::move(VarList)};
+ }
+
void setWaitDetails(Expr *DevNum, SourceLocation QueuesLoc,
llvm::SmallVector<Expr *> &&IntExprs) {
assert(ClauseKind == OpenACCClauseKind::Wait &&
@@ -394,7 +415,11 @@ public:
/// Called when encountering a 'var' for OpenACC, ensures it is actually a
/// declaration reference to a variable of the correct type.
- ExprResult ActOnVar(Expr *VarExpr);
+ ExprResult ActOnVar(OpenACCClauseKind CK, Expr *VarExpr);
+
+ /// Called while semantically analyzing the reduction clause, ensuring the var
+ /// is the correct kind of reference.
+ ExprResult CheckReductionVar(Expr *VarExpr);
/// Called to check the 'var' type is a variable of pointer type, necessary
/// for 'deviceptr' and 'attach' clauses. Returns true on success.
diff --git a/clang/include/clang/Sema/SemaPseudoObject.h b/clang/include/clang/Sema/SemaPseudoObject.h
new file mode 100644
index 000000000000..22d8be2b3726
--- /dev/null
+++ b/clang/include/clang/Sema/SemaPseudoObject.h
@@ -0,0 +1,40 @@
+//===----- SemaPseudoObject.h --- Semantic Analysis for Pseudo-Objects ----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/// \file
+/// This file declares semantic analysis for expressions involving
+// pseudo-object references.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_CLANG_SEMA_SEMAPSEUDOOBJECT_H
+#define LLVM_CLANG_SEMA_SEMAPSEUDOOBJECT_H
+
+#include "clang/AST/Expr.h"
+#include "clang/AST/OperationKinds.h"
+#include "clang/Basic/SourceLocation.h"
+#include "clang/Sema/Ownership.h"
+#include "clang/Sema/Scope.h"
+#include "clang/Sema/SemaBase.h"
+
+namespace clang {
+
+class SemaPseudoObject : public SemaBase {
+public:
+ SemaPseudoObject(Sema &S);
+
+ ExprResult checkIncDec(Scope *S, SourceLocation OpLoc,
+ UnaryOperatorKind Opcode, Expr *Op);
+ ExprResult checkAssignment(Scope *S, SourceLocation OpLoc,
+ BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS);
+ ExprResult checkRValue(Expr *E);
+ Expr *recreateSyntacticForm(PseudoObjectExpr *E);
+};
+
+} // namespace clang
+
+#endif // LLVM_CLANG_SEMA_SEMAPSEUDOOBJECT_H \ No newline at end of file
diff --git a/clang/lib/ARCMigrate/ARCMT.cpp b/clang/lib/ARCMigrate/ARCMT.cpp
index b410d5f3b42a..5835559bff6b 100644
--- a/clang/lib/ARCMigrate/ARCMT.cpp
+++ b/clang/lib/ARCMigrate/ARCMT.cpp
@@ -606,8 +606,7 @@ bool MigrationProcess::applyTransform(TransformFn trans,
llvm::raw_svector_ostream vecOS(newText);
buf.write(vecOS);
std::unique_ptr<llvm::MemoryBuffer> memBuf(
- llvm::MemoryBuffer::getMemBufferCopy(
- StringRef(newText.data(), newText.size()), newFname));
+ llvm::MemoryBuffer::getMemBufferCopy(newText.str(), newFname));
SmallString<64> filePath(file->getName());
Unit->getFileManager().FixupRelativePath(filePath);
Remapper.remap(filePath.str(), std::move(memBuf));
diff --git a/clang/lib/ARCMigrate/ObjCMT.cpp b/clang/lib/ARCMigrate/ObjCMT.cpp
index aaf41dc4039c..4357c8e3f09a 100644
--- a/clang/lib/ARCMigrate/ObjCMT.cpp
+++ b/clang/lib/ARCMigrate/ObjCMT.cpp
@@ -1963,8 +1963,7 @@ void ObjCMigrateASTConsumer::HandleTranslationUnit(ASTContext &Ctx) {
llvm::raw_svector_ostream vecOS(newText);
buf.write(vecOS);
std::unique_ptr<llvm::MemoryBuffer> memBuf(
- llvm::MemoryBuffer::getMemBufferCopy(
- StringRef(newText.data(), newText.size()), file->getName()));
+ llvm::MemoryBuffer::getMemBufferCopy(newText.str(), file->getName()));
SmallString<64> filePath(file->getName());
FileMgr.FixupRelativePath(filePath);
Remapper.remap(filePath.str(), std::move(memBuf));
diff --git a/clang/lib/AST/ASTContext.cpp b/clang/lib/AST/ASTContext.cpp
index 8fc2bb8c401c..f9902a978aa3 100644
--- a/clang/lib/AST/ASTContext.cpp
+++ b/clang/lib/AST/ASTContext.cpp
@@ -3054,21 +3054,27 @@ QualType ASTContext::removeAddrSpaceQualType(QualType T) const {
if (!T.hasAddressSpace())
return T;
- // If we are composing extended qualifiers together, merge together
- // into one ExtQuals node.
QualifierCollector Quals;
const Type *TypeNode;
+ // For arrays, strip the qualifier off the element type, then reconstruct the
+ // array type
+ if (T.getTypePtr()->isArrayType()) {
+ T = getUnqualifiedArrayType(T, Quals);
+ TypeNode = T.getTypePtr();
+ } else {
+ // If we are composing extended qualifiers together, merge together
+ // into one ExtQuals node.
+ while (T.hasAddressSpace()) {
+ TypeNode = Quals.strip(T);
+
+ // If the type no longer has an address space after stripping qualifiers,
+ // jump out.
+ if (!QualType(TypeNode, 0).hasAddressSpace())
+ break;
- while (T.hasAddressSpace()) {
- TypeNode = Quals.strip(T);
-
- // If the type no longer has an address space after stripping qualifiers,
- // jump out.
- if (!QualType(TypeNode, 0).hasAddressSpace())
- break;
-
- // There might be sugar in the way. Strip it and try again.
- T = T.getSingleStepDesugaredType(*this);
+ // There might be sugar in the way. Strip it and try again.
+ T = T.getSingleStepDesugaredType(*this);
+ }
}
Quals.removeAddressSpace();
@@ -6093,7 +6099,7 @@ CanQualType ASTContext::getCanonicalParamType(QualType T) const {
}
QualType ASTContext::getUnqualifiedArrayType(QualType type,
- Qualifiers &quals) {
+ Qualifiers &quals) const {
SplitQualType splitType = type.getSplitUnqualifiedType();
// FIXME: getSplitUnqualifiedType() actually walks all the way to
@@ -6488,7 +6494,8 @@ bool ASTContext::isSameDefaultTemplateArgument(const NamedDecl *X,
if (!TTPX->hasDefaultArgument() || !TTPY->hasDefaultArgument())
return false;
- return hasSameType(TTPX->getDefaultArgument(), TTPY->getDefaultArgument());
+ return hasSameType(TTPX->getDefaultArgument().getArgument().getAsType(),
+ TTPY->getDefaultArgument().getArgument().getAsType());
}
if (auto *NTTPX = dyn_cast<NonTypeTemplateParmDecl>(X)) {
diff --git a/clang/lib/AST/ASTImporter.cpp b/clang/lib/AST/ASTImporter.cpp
index 9ff8e1ea78d8..a47dd72a5679 100644
--- a/clang/lib/AST/ASTImporter.cpp
+++ b/clang/lib/AST/ASTImporter.cpp
@@ -5917,11 +5917,11 @@ ASTNodeImporter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
}
if (D->hasDefaultArgument()) {
- Expected<TypeSourceInfo *> ToDefaultArgOrErr =
- import(D->getDefaultArgumentInfo());
+ Expected<TemplateArgumentLoc> ToDefaultArgOrErr =
+ import(D->getDefaultArgument());
if (!ToDefaultArgOrErr)
return ToDefaultArgOrErr.takeError();
- ToD->setDefaultArgument(*ToDefaultArgOrErr);
+ ToD->setDefaultArgument(ToD->getASTContext(), *ToDefaultArgOrErr);
}
return ToD;
diff --git a/clang/lib/AST/DeclPrinter.cpp b/clang/lib/AST/DeclPrinter.cpp
index c5868256b440..bf32aafc3d0a 100644
--- a/clang/lib/AST/DeclPrinter.cpp
+++ b/clang/lib/AST/DeclPrinter.cpp
@@ -1883,7 +1883,8 @@ void DeclPrinter::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *TTP) {
if (TTP->hasDefaultArgument()) {
Out << " = ";
- Out << TTP->getDefaultArgument().getAsString(Policy);
+ TTP->getDefaultArgument().getArgument().print(Policy, Out,
+ /*IncludeType=*/false);
}
}
diff --git a/clang/lib/AST/DeclTemplate.cpp b/clang/lib/AST/DeclTemplate.cpp
index 26765a5da1dc..46bb3e8e5138 100644
--- a/clang/lib/AST/DeclTemplate.cpp
+++ b/clang/lib/AST/DeclTemplate.cpp
@@ -669,23 +669,30 @@ TemplateTypeParmDecl::CreateDeserialized(const ASTContext &C, GlobalDeclID ID,
}
SourceLocation TemplateTypeParmDecl::getDefaultArgumentLoc() const {
- return hasDefaultArgument()
- ? getDefaultArgumentInfo()->getTypeLoc().getBeginLoc()
- : SourceLocation();
+ return hasDefaultArgument() ? getDefaultArgument().getLocation()
+ : SourceLocation();
}
SourceRange TemplateTypeParmDecl::getSourceRange() const {
if (hasDefaultArgument() && !defaultArgumentWasInherited())
return SourceRange(getBeginLoc(),
- getDefaultArgumentInfo()->getTypeLoc().getEndLoc());
+ getDefaultArgument().getSourceRange().getEnd());
// TypeDecl::getSourceRange returns a range containing name location, which is
// wrong for unnamed template parameters. e.g:
// it will return <[[typename>]] instead of <[[typename]]>
- else if (getDeclName().isEmpty())
+ if (getDeclName().isEmpty())
return SourceRange(getBeginLoc());
return TypeDecl::getSourceRange();
}
+void TemplateTypeParmDecl::setDefaultArgument(
+ const ASTContext &C, const TemplateArgumentLoc &DefArg) {
+ if (DefArg.getArgument().isNull())
+ DefaultArgument.set(nullptr);
+ else
+ DefaultArgument.set(new (C) TemplateArgumentLoc(DefArg));
+}
+
unsigned TemplateTypeParmDecl::getDepth() const {
return getTypeForDecl()->castAs<TemplateTypeParmType>()->getDepth();
}
diff --git a/clang/lib/AST/ExprCXX.cpp b/clang/lib/AST/ExprCXX.cpp
index 7e9343271ac3..2abc0acbfde3 100644
--- a/clang/lib/AST/ExprCXX.cpp
+++ b/clang/lib/AST/ExprCXX.cpp
@@ -1665,12 +1665,10 @@ NonTypeTemplateParmDecl *SubstNonTypeTemplateParmExpr::getParameter() const {
getReplacedTemplateParameterList(getAssociatedDecl())->asArray()[Index]);
}
-PackIndexingExpr *PackIndexingExpr::Create(ASTContext &Context,
- SourceLocation EllipsisLoc,
- SourceLocation RSquareLoc,
- Expr *PackIdExpr, Expr *IndexExpr,
- std::optional<int64_t> Index,
- ArrayRef<Expr *> SubstitutedExprs) {
+PackIndexingExpr *PackIndexingExpr::Create(
+ ASTContext &Context, SourceLocation EllipsisLoc, SourceLocation RSquareLoc,
+ Expr *PackIdExpr, Expr *IndexExpr, std::optional<int64_t> Index,
+ ArrayRef<Expr *> SubstitutedExprs, bool ExpandedToEmptyPack) {
QualType Type;
if (Index && !SubstitutedExprs.empty())
Type = SubstitutedExprs[*Index]->getType();
@@ -1679,8 +1677,9 @@ PackIndexingExpr *PackIndexingExpr::Create(ASTContext &Context,
void *Storage =
Context.Allocate(totalSizeToAlloc<Expr *>(SubstitutedExprs.size()));
- return new (Storage) PackIndexingExpr(
- Type, EllipsisLoc, RSquareLoc, PackIdExpr, IndexExpr, SubstitutedExprs);
+ return new (Storage)
+ PackIndexingExpr(Type, EllipsisLoc, RSquareLoc, PackIdExpr, IndexExpr,
+ SubstitutedExprs, ExpandedToEmptyPack);
}
NamedDecl *PackIndexingExpr::getPackDecl() const {
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.cpp b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
index 70328c1f52af..859a3fabea32 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.cpp
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.cpp
@@ -1595,6 +1595,30 @@ bool ByteCodeExprGen<Emitter>::VisitStringLiteral(const StringLiteral *E) {
}
template <class Emitter>
+bool ByteCodeExprGen<Emitter>::VisitSYCLUniqueStableNameExpr(
+ const SYCLUniqueStableNameExpr *E) {
+ if (DiscardResult)
+ return true;
+
+ assert(!Initializing);
+
+ auto &A = Ctx.getASTContext();
+ std::string ResultStr = E->ComputeName(A);
+
+ QualType CharTy = A.CharTy.withConst();
+ APInt Size(A.getTypeSize(A.getSizeType()), ResultStr.size() + 1);
+ QualType ArrayTy = A.getConstantArrayType(CharTy, Size, nullptr,
+ ArraySizeModifier::Normal, 0);
+
+ StringLiteral *SL =
+ StringLiteral::Create(A, ResultStr, StringLiteralKind::Ordinary,
+ /*Pascal=*/false, ArrayTy, E->getLocation());
+
+ unsigned StringIndex = P.createGlobalString(SL);
+ return this->emitGetPtrGlobal(StringIndex, E);
+}
+
+template <class Emitter>
bool ByteCodeExprGen<Emitter>::VisitCharacterLiteral(
const CharacterLiteral *E) {
if (DiscardResult)
@@ -2088,6 +2112,21 @@ bool ByteCodeExprGen<Emitter>::VisitCXXConstructExpr(
if (T->isRecordType()) {
const CXXConstructorDecl *Ctor = E->getConstructor();
+ // If we're discarding a construct expression, we still need
+ // to allocate a variable and call the constructor and destructor.
+ if (DiscardResult) {
+ if (Ctor->isTrivial())
+ return true;
+ assert(!Initializing);
+ std::optional<unsigned> LocalIndex = allocateLocal(E);
+
+ if (!LocalIndex)
+ return false;
+
+ if (!this->emitGetPtrLocal(*LocalIndex, E))
+ return false;
+ }
+
// Zero initialization.
if (E->requiresZeroInitialization()) {
const Record *R = getRecord(E->getType());
@@ -2108,19 +2147,6 @@ bool ByteCodeExprGen<Emitter>::VisitCXXConstructExpr(
assert(Func->hasThisPointer());
assert(!Func->hasRVO());
- // If we're discarding a construct expression, we still need
- // to allocate a variable and call the constructor and destructor.
- if (DiscardResult) {
- assert(!Initializing);
- std::optional<unsigned> LocalIndex = allocateLocal(E);
-
- if (!LocalIndex)
- return false;
-
- if (!this->emitGetPtrLocal(*LocalIndex, E))
- return false;
- }
-
// The This pointer is already on the stack because this is an initializer,
// but we need to dup() so the call() below has its own copy.
if (!this->emitDupPtr(E))
@@ -2538,8 +2564,6 @@ bool ByteCodeExprGen<Emitter>::VisitShuffleVectorExpr(
assert(E->getNumSubExprs() > 2);
const Expr *Vecs[] = {E->getExpr(0), E->getExpr(1)};
- assert(Vecs[0]->getType() == Vecs[1]->getType());
-
const VectorType *VT = Vecs[0]->getType()->castAs<VectorType>();
PrimType ElemT = classifyPrim(VT->getElementType());
unsigned NumInputElems = VT->getNumElements();
@@ -3371,6 +3395,9 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
switch (E->getOpcode()) {
case UO_PostInc: { // x++
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->emitInvalid(E);
+
if (!this->visit(SubExpr))
return false;
@@ -3389,6 +3416,9 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return DiscardResult ? this->emitIncPop(*T, E) : this->emitInc(*T, E);
}
case UO_PostDec: { // x--
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->emitInvalid(E);
+
if (!this->visit(SubExpr))
return false;
@@ -3407,6 +3437,9 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return DiscardResult ? this->emitDecPop(*T, E) : this->emitDec(*T, E);
}
case UO_PreInc: { // ++x
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->emitInvalid(E);
+
if (!this->visit(SubExpr))
return false;
@@ -3451,6 +3484,9 @@ bool ByteCodeExprGen<Emitter>::VisitUnaryOperator(const UnaryOperator *E) {
return E->isGLValue() || this->emitLoadPop(*T, E);
}
case UO_PreDec: { // --x
+ if (!Ctx.getLangOpts().CPlusPlus14)
+ return this->emitInvalid(E);
+
if (!this->visit(SubExpr))
return false;
@@ -3724,8 +3760,16 @@ bool ByteCodeExprGen<Emitter>::VisitDeclRefExpr(const DeclRefExpr *E) {
}
}
- if (std::optional<unsigned> I = P.getOrCreateDummy(D))
- return this->emitGetPtrGlobal(*I, E);
+ if (std::optional<unsigned> I = P.getOrCreateDummy(D)) {
+ if (!this->emitGetPtrGlobal(*I, E))
+ return false;
+ // Convert the dummy pointer to another pointer type if we have to.
+ if (PrimType PT = classifyPrim(E); PT != PT_Ptr) {
+ if (!this->emitDecayPtr(PT_Ptr, PT, E))
+ return false;
+ }
+ return true;
+ }
return this->emitInvalidDeclRef(E, E);
}
diff --git a/clang/lib/AST/Interp/ByteCodeExprGen.h b/clang/lib/AST/Interp/ByteCodeExprGen.h
index e73a2f0334cf..a2e283c86633 100644
--- a/clang/lib/AST/Interp/ByteCodeExprGen.h
+++ b/clang/lib/AST/Interp/ByteCodeExprGen.h
@@ -90,6 +90,7 @@ public:
bool VisitOpaqueValueExpr(const OpaqueValueExpr *E);
bool VisitAbstractConditionalOperator(const AbstractConditionalOperator *E);
bool VisitStringLiteral(const StringLiteral *E);
+ bool VisitSYCLUniqueStableNameExpr(const SYCLUniqueStableNameExpr *E);
bool VisitCharacterLiteral(const CharacterLiteral *E);
bool VisitCompoundAssignOperator(const CompoundAssignOperator *E);
bool VisitFloatCompoundAssignOperator(const CompoundAssignOperator *E);
diff --git a/clang/lib/AST/Interp/Interp.h b/clang/lib/AST/Interp/Interp.h
index d9f23a4b8c96..bc2ca126ce36 100644
--- a/clang/lib/AST/Interp/Interp.h
+++ b/clang/lib/AST/Interp/Interp.h
@@ -1245,6 +1245,8 @@ inline bool GetPtrField(InterpState &S, CodePtr OpPC, uint32_t Off) {
return false;
if (!CheckRange(S, OpPC, Ptr, CSK_Field))
return false;
+ if (!CheckArray(S, OpPC, Ptr))
+ return false;
if (!CheckSubobject(S, OpPC, Ptr, CSK_Field))
return false;
diff --git a/clang/lib/AST/Interp/Pointer.h b/clang/lib/AST/Interp/Pointer.h
index 9900f37e60d4..3ade5756e580 100644
--- a/clang/lib/AST/Interp/Pointer.h
+++ b/clang/lib/AST/Interp/Pointer.h
@@ -556,12 +556,16 @@ public:
if (!asBlockPointer().Pointee)
return false;
- return isElementPastEnd() || getSize() == getOffset();
+ return isElementPastEnd() ||
+ (getSize() == getOffset() && !isZeroSizeArray());
}
/// Checks if the pointer is an out-of-bounds element pointer.
bool isElementPastEnd() const { return Offset == PastEndMark; }
+ /// Checks if the pointer is pointing to a zero-size array.
+ bool isZeroSizeArray() const { return getFieldDesc()->isZeroSizeArray(); }
+
/// Dereferences the pointer, if it's live.
template <typename T> T &deref() const {
assert(isLive() && "Invalid pointer");
diff --git a/clang/lib/AST/JSONNodeDumper.cpp b/clang/lib/AST/JSONNodeDumper.cpp
index 42608476b1c1..efd3a639d746 100644
--- a/clang/lib/AST/JSONNodeDumper.cpp
+++ b/clang/lib/AST/JSONNodeDumper.cpp
@@ -1028,7 +1028,7 @@ void JSONNodeDumper::VisitTemplateTypeParmDecl(const TemplateTypeParmDecl *D) {
if (D->hasDefaultArgument())
JOS.attributeObject("defaultArg", [=] {
- Visit(D->getDefaultArgument(), SourceRange(),
+ Visit(D->getDefaultArgument().getArgument(), SourceRange(),
D->getDefaultArgStorage().getInheritedFrom(),
D->defaultArgumentWasInherited() ? "inherited from" : "previous");
});
diff --git a/clang/lib/AST/ODRDiagsEmitter.cpp b/clang/lib/AST/ODRDiagsEmitter.cpp
index 5b1cdc16e2ea..97b6c14d9ede 100644
--- a/clang/lib/AST/ODRDiagsEmitter.cpp
+++ b/clang/lib/AST/ODRDiagsEmitter.cpp
@@ -1409,13 +1409,15 @@ bool ODRDiagsEmitter::diagnoseMismatch(
}
if (HasFirstDefaultArgument && HasSecondDefaultArgument) {
- QualType FirstType = FirstTTPD->getDefaultArgument();
- QualType SecondType = SecondTTPD->getDefaultArgument();
- if (computeODRHash(FirstType) != computeODRHash(SecondType)) {
+ TemplateArgument FirstTA =
+ FirstTTPD->getDefaultArgument().getArgument();
+ TemplateArgument SecondTA =
+ SecondTTPD->getDefaultArgument().getArgument();
+ if (computeODRHash(FirstTA) != computeODRHash(SecondTA)) {
DiagTemplateError(FunctionTemplateParameterDifferentDefaultArgument)
- << (i + 1) << FirstType;
+ << (i + 1) << FirstTA;
DiagTemplateNote(FunctionTemplateParameterDifferentDefaultArgument)
- << (i + 1) << SecondType;
+ << (i + 1) << SecondTA;
return true;
}
}
diff --git a/clang/lib/AST/ODRHash.cpp b/clang/lib/AST/ODRHash.cpp
index 6f04739cf669..18b1e17e0fee 100644
--- a/clang/lib/AST/ODRHash.cpp
+++ b/clang/lib/AST/ODRHash.cpp
@@ -462,7 +462,7 @@ public:
D->hasDefaultArgument() && !D->defaultArgumentWasInherited();
Hash.AddBoolean(hasDefaultArgument);
if (hasDefaultArgument) {
- AddTemplateArgument(D->getDefaultArgument());
+ AddTemplateArgument(D->getDefaultArgument().getArgument());
}
Hash.AddBoolean(D->isParameterPack());
diff --git a/clang/lib/AST/OpenACCClause.cpp b/clang/lib/AST/OpenACCClause.cpp
index 8ff6dabcbc48..cb2c7f98be75 100644
--- a/clang/lib/AST/OpenACCClause.cpp
+++ b/clang/lib/AST/OpenACCClause.cpp
@@ -35,7 +35,7 @@ bool OpenACCClauseWithVarList::classof(const OpenACCClause *C) {
OpenACCAttachClause::classof(C) || OpenACCNoCreateClause::classof(C) ||
OpenACCPresentClause::classof(C) || OpenACCCopyClause::classof(C) ||
OpenACCCopyInClause::classof(C) || OpenACCCopyOutClause::classof(C) ||
- OpenACCCreateClause::classof(C);
+ OpenACCReductionClause::classof(C) || OpenACCCreateClause::classof(C);
}
bool OpenACCClauseWithCondition::classof(const OpenACCClause *C) {
return OpenACCIfClause::classof(C) || OpenACCSelfClause::classof(C);
@@ -310,6 +310,16 @@ OpenACCDeviceTypeClause *OpenACCDeviceTypeClause::Create(
OpenACCDeviceTypeClause(K, BeginLoc, LParenLoc, Archs, EndLoc);
}
+OpenACCReductionClause *OpenACCReductionClause::Create(
+ const ASTContext &C, SourceLocation BeginLoc, SourceLocation LParenLoc,
+ OpenACCReductionOperator Operator, ArrayRef<Expr *> VarList,
+ SourceLocation EndLoc) {
+ void *Mem = C.Allocate(
+ OpenACCReductionClause::totalSizeToAlloc<Expr *>(VarList.size()));
+ return new (Mem)
+ OpenACCReductionClause(BeginLoc, LParenLoc, Operator, VarList, EndLoc);
+}
+
//===----------------------------------------------------------------------===//
// OpenACC clauses printing methods
//===----------------------------------------------------------------------===//
@@ -445,6 +455,14 @@ void OpenACCClausePrinter::VisitCreateClause(const OpenACCCreateClause &C) {
OS << ")";
}
+void OpenACCClausePrinter::VisitReductionClause(
+ const OpenACCReductionClause &C) {
+ OS << "reduction(" << C.getReductionOp() << ": ";
+ llvm::interleaveComma(C.getVarList(), OS,
+ [&](const Expr *E) { printExpr(E); });
+ OS << ")";
+}
+
void OpenACCClausePrinter::VisitWaitClause(const OpenACCWaitClause &C) {
OS << "wait";
if (!C.getLParenLoc().isInvalid()) {
diff --git a/clang/lib/AST/StmtProfile.cpp b/clang/lib/AST/StmtProfile.cpp
index caab4ab0ef16..00b8c43af035 100644
--- a/clang/lib/AST/StmtProfile.cpp
+++ b/clang/lib/AST/StmtProfile.cpp
@@ -2588,6 +2588,12 @@ void OpenACCClauseProfiler::VisitWaitClause(const OpenACCWaitClause &Clause) {
/// Nothing to do here, there are no sub-statements.
void OpenACCClauseProfiler::VisitDeviceTypeClause(
const OpenACCDeviceTypeClause &Clause) {}
+
+void OpenACCClauseProfiler::VisitReductionClause(
+ const OpenACCReductionClause &Clause) {
+ for (auto *E : Clause.getVarList())
+ Profiler.VisitStmt(E);
+}
} // namespace
void StmtProfiler::VisitOpenACCComputeConstruct(
diff --git a/clang/lib/AST/TextNodeDumper.cpp b/clang/lib/AST/TextNodeDumper.cpp
index efcd74717a4e..4a1e94ffe283 100644
--- a/clang/lib/AST/TextNodeDumper.cpp
+++ b/clang/lib/AST/TextNodeDumper.cpp
@@ -457,6 +457,10 @@ void TextNodeDumper::Visit(const OpenACCClause *C) {
});
OS << ")";
break;
+ case OpenACCClauseKind::Reduction:
+ OS << " clause Operator: "
+ << cast<OpenACCReductionClause>(C)->getReductionOp();
+ break;
default:
// Nothing to do here.
break;
diff --git a/clang/lib/AST/Type.cpp b/clang/lib/AST/Type.cpp
index e31741cd4424..3b90b8229dd1 100644
--- a/clang/lib/AST/Type.cpp
+++ b/clang/lib/AST/Type.cpp
@@ -2372,6 +2372,14 @@ bool Type::isIncompleteType(NamedDecl **Def) const {
*Def = Rec;
return !Rec->isCompleteDefinition();
}
+ case InjectedClassName: {
+ CXXRecordDecl *Rec = cast<InjectedClassNameType>(CanonicalType)->getDecl();
+ if (!Rec->isBeingDefined())
+ return false;
+ if (Def)
+ *Def = Rec;
+ return true;
+ }
case ConstantArray:
case VariableArray:
// An array is incomplete if its element type is incomplete
diff --git a/clang/lib/AST/TypePrinter.cpp b/clang/lib/AST/TypePrinter.cpp
index 87f0a8728d85..981f09410401 100644
--- a/clang/lib/AST/TypePrinter.cpp
+++ b/clang/lib/AST/TypePrinter.cpp
@@ -2273,8 +2273,8 @@ bool clang::isSubstitutedDefaultArgument(ASTContext &Ctx, TemplateArgument Arg,
if (auto *TTPD = dyn_cast<TemplateTypeParmDecl>(Param)) {
return TTPD->hasDefaultArgument() &&
- isSubstitutedTemplateArgument(Ctx, Arg, TTPD->getDefaultArgument(),
- Args, Depth);
+ isSubstitutedTemplateArgument(
+ Ctx, Arg, TTPD->getDefaultArgument().getArgument(), Args, Depth);
} else if (auto *TTPD = dyn_cast<TemplateTemplateParmDecl>(Param)) {
return TTPD->hasDefaultArgument() &&
isSubstitutedTemplateArgument(
diff --git a/clang/lib/Analysis/FlowSensitive/CMakeLists.txt b/clang/lib/Analysis/FlowSensitive/CMakeLists.txt
index 6631fe27f3d9..f89d4e57e581 100644
--- a/clang/lib/Analysis/FlowSensitive/CMakeLists.txt
+++ b/clang/lib/Analysis/FlowSensitive/CMakeLists.txt
@@ -2,6 +2,7 @@ add_clang_library(clangAnalysisFlowSensitive
AdornedCFG.cpp
Arena.cpp
ASTOps.cpp
+ CNFFormula.cpp
DataflowAnalysisContext.cpp
DataflowEnvironment.cpp
Formula.cpp
diff --git a/clang/lib/Analysis/FlowSensitive/CNFFormula.cpp b/clang/lib/Analysis/FlowSensitive/CNFFormula.cpp
new file mode 100644
index 000000000000..2410ce1e7bd6
--- /dev/null
+++ b/clang/lib/Analysis/FlowSensitive/CNFFormula.cpp
@@ -0,0 +1,303 @@
+//===- CNFFormula.cpp -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A representation of a boolean formula in 3-CNF.
+//
+//===----------------------------------------------------------------------===//
+
+#include "clang/Analysis/FlowSensitive/CNFFormula.h"
+#include "llvm/ADT/DenseSet.h"
+
+#include <queue>
+
+namespace clang {
+namespace dataflow {
+
+namespace {
+
+/// Applies simplifications while building up a BooleanFormula.
+/// We keep track of unit clauses, which tell us variables that must be
+/// true/false in any model that satisfies the overall formula.
+/// Such variables can be dropped from subsequently-added clauses, which
+/// may in turn yield more unit clauses or even a contradiction.
+/// The total added complexity of this preprocessing is O(N) where we
+/// for every clause, we do a lookup for each unit clauses.
+/// The lookup is O(1) on average. This method won't catch all
+/// contradictory formulas, more passes can in principle catch
+/// more cases but we leave all these and the general case to the
+/// proper SAT solver.
+struct CNFFormulaBuilder {
+ // Formula should outlive CNFFormulaBuilder.
+ explicit CNFFormulaBuilder(CNFFormula &CNF) : Formula(CNF) {}
+
+ /// Adds the `L1 v ... v Ln` clause to the formula. Applies
+ /// simplifications, based on single-literal clauses.
+ ///
+ /// Requirements:
+ ///
+ /// `Li` must not be `NullLit`.
+ ///
+ /// All literals must be distinct.
+ void addClause(ArrayRef<Literal> Literals) {
+ // We generate clauses with up to 3 literals in this file.
+ assert(!Literals.empty() && Literals.size() <= 3);
+ // Contains literals of the simplified clause.
+ llvm::SmallVector<Literal> Simplified;
+ for (auto L : Literals) {
+ assert(L != NullLit &&
+ llvm::all_of(Simplified, [L](Literal S) { return S != L; }));
+ auto X = var(L);
+ if (trueVars.contains(X)) { // X must be true
+ if (isPosLit(L))
+ return; // Omit clause `(... v X v ...)`, it is `true`.
+ else
+ continue; // Omit `!X` from `(... v !X v ...)`.
+ }
+ if (falseVars.contains(X)) { // X must be false
+ if (isNegLit(L))
+ return; // Omit clause `(... v !X v ...)`, it is `true`.
+ else
+ continue; // Omit `X` from `(... v X v ...)`.
+ }
+ Simplified.push_back(L);
+ }
+ if (Simplified.empty()) {
+ // Simplification made the clause empty, which is equivalent to `false`.
+ // We already know that this formula is unsatisfiable.
+ Formula.addClause(Simplified);
+ return;
+ }
+ if (Simplified.size() == 1) {
+ // We have new unit clause.
+ const Literal lit = Simplified.front();
+ const Variable v = var(lit);
+ if (isPosLit(lit))
+ trueVars.insert(v);
+ else
+ falseVars.insert(v);
+ }
+ Formula.addClause(Simplified);
+ }
+
+ /// Returns true if we observed a contradiction while adding clauses.
+ /// In this case then the formula is already known to be unsatisfiable.
+ bool isKnownContradictory() { return Formula.knownContradictory(); }
+
+private:
+ CNFFormula &Formula;
+ llvm::DenseSet<Variable> trueVars;
+ llvm::DenseSet<Variable> falseVars;
+};
+
+} // namespace
+
+CNFFormula::CNFFormula(Variable LargestVar)
+ : LargestVar(LargestVar), KnownContradictory(false) {
+ Clauses.push_back(0);
+ ClauseStarts.push_back(0);
+}
+
+void CNFFormula::addClause(ArrayRef<Literal> lits) {
+ assert(llvm::all_of(lits, [](Literal L) { return L != NullLit; }));
+
+ if (lits.empty())
+ KnownContradictory = true;
+
+ const size_t S = Clauses.size();
+ ClauseStarts.push_back(S);
+ Clauses.insert(Clauses.end(), lits.begin(), lits.end());
+}
+
+CNFFormula buildCNF(const llvm::ArrayRef<const Formula *> &Formulas,
+ llvm::DenseMap<Variable, Atom> &Atomics) {
+ // The general strategy of the algorithm implemented below is to map each
+ // of the sub-values in `Vals` to a unique variable and use these variables in
+ // the resulting CNF expression to avoid exponential blow up. The number of
+ // literals in the resulting formula is guaranteed to be linear in the number
+ // of sub-formulas in `Vals`.
+
+ // Map each sub-formula in `Vals` to a unique variable.
+ llvm::DenseMap<const Formula *, Variable> FormulaToVar;
+ // Store variable identifiers and Atom of atomic booleans.
+ Variable NextVar = 1;
+ {
+ std::queue<const Formula *> UnprocessedFormulas;
+ for (const Formula *F : Formulas)
+ UnprocessedFormulas.push(F);
+ while (!UnprocessedFormulas.empty()) {
+ Variable Var = NextVar;
+ const Formula *F = UnprocessedFormulas.front();
+ UnprocessedFormulas.pop();
+
+ if (!FormulaToVar.try_emplace(F, Var).second)
+ continue;
+ ++NextVar;
+
+ for (const Formula *Op : F->operands())
+ UnprocessedFormulas.push(Op);
+ if (F->kind() == Formula::AtomRef)
+ Atomics[Var] = F->getAtom();
+ }
+ }
+
+ auto GetVar = [&FormulaToVar](const Formula *F) {
+ auto ValIt = FormulaToVar.find(F);
+ assert(ValIt != FormulaToVar.end());
+ return ValIt->second;
+ };
+
+ CNFFormula CNF(NextVar - 1);
+ std::vector<bool> ProcessedSubVals(NextVar, false);
+ CNFFormulaBuilder builder(CNF);
+
+ // Add a conjunct for each variable that represents a top-level conjunction
+ // value in `Vals`.
+ for (const Formula *F : Formulas)
+ builder.addClause(posLit(GetVar(F)));
+
+ // Add conjuncts that represent the mapping between newly-created variables
+ // and their corresponding sub-formulas.
+ std::queue<const Formula *> UnprocessedFormulas;
+ for (const Formula *F : Formulas)
+ UnprocessedFormulas.push(F);
+ while (!UnprocessedFormulas.empty()) {
+ const Formula *F = UnprocessedFormulas.front();
+ UnprocessedFormulas.pop();
+ const Variable Var = GetVar(F);
+
+ if (ProcessedSubVals[Var])
+ continue;
+ ProcessedSubVals[Var] = true;
+
+ switch (F->kind()) {
+ case Formula::AtomRef:
+ break;
+ case Formula::Literal:
+ CNF.addClause(F->literal() ? posLit(Var) : negLit(Var));
+ break;
+ case Formula::And: {
+ const Variable LHS = GetVar(F->operands()[0]);
+ const Variable RHS = GetVar(F->operands()[1]);
+
+ if (LHS == RHS) {
+ // `X <=> (A ^ A)` is equivalent to `(!X v A) ^ (X v !A)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS)});
+ builder.addClause({posLit(Var), negLit(LHS)});
+ } else {
+ // `X <=> (A ^ B)` is equivalent to `(!X v A) ^ (!X v B) ^ (X v !A v
+ // !B)` which is already in conjunctive normal form. Below we add each
+ // of the conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS)});
+ builder.addClause({negLit(Var), posLit(RHS)});
+ builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
+ }
+ break;
+ }
+ case Formula::Or: {
+ const Variable LHS = GetVar(F->operands()[0]);
+ const Variable RHS = GetVar(F->operands()[1]);
+
+ if (LHS == RHS) {
+ // `X <=> (A v A)` is equivalent to `(!X v A) ^ (X v !A)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS)});
+ builder.addClause({posLit(Var), negLit(LHS)});
+ } else {
+ // `X <=> (A v B)` is equivalent to `(!X v A v B) ^ (X v !A) ^ (X v
+ // !B)` which is already in conjunctive normal form. Below we add each
+ // of the conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), posLit(LHS), posLit(RHS)});
+ builder.addClause({posLit(Var), negLit(LHS)});
+ builder.addClause({posLit(Var), negLit(RHS)});
+ }
+ break;
+ }
+ case Formula::Not: {
+ const Variable Operand = GetVar(F->operands()[0]);
+
+ // `X <=> !Y` is equivalent to `(!X v !Y) ^ (X v Y)` which is
+ // already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({negLit(Var), negLit(Operand)});
+ builder.addClause({posLit(Var), posLit(Operand)});
+ break;
+ }
+ case Formula::Implies: {
+ const Variable LHS = GetVar(F->operands()[0]);
+ const Variable RHS = GetVar(F->operands()[1]);
+
+ // `X <=> (A => B)` is equivalent to
+ // `(X v A) ^ (X v !B) ^ (!X v !A v B)` which is already in
+ // conjunctive normal form. Below we add each of the conjuncts of
+ // the latter expression to the result.
+ builder.addClause({posLit(Var), posLit(LHS)});
+ builder.addClause({posLit(Var), negLit(RHS)});
+ builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
+ break;
+ }
+ case Formula::Equal: {
+ const Variable LHS = GetVar(F->operands()[0]);
+ const Variable RHS = GetVar(F->operands()[1]);
+
+ if (LHS == RHS) {
+ // `X <=> (A <=> A)` is equivalent to `X` which is already in
+ // conjunctive normal form. Below we add each of the conjuncts of the
+ // latter expression to the result.
+ builder.addClause(posLit(Var));
+
+ // No need to visit the sub-values of `Val`.
+ continue;
+ }
+ // `X <=> (A <=> B)` is equivalent to
+ // `(X v A v B) ^ (X v !A v !B) ^ (!X v A v !B) ^ (!X v !A v B)` which
+ // is already in conjunctive normal form. Below we add each of the
+ // conjuncts of the latter expression to the result.
+ builder.addClause({posLit(Var), posLit(LHS), posLit(RHS)});
+ builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
+ builder.addClause({negLit(Var), posLit(LHS), negLit(RHS)});
+ builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
+ break;
+ }
+ }
+ if (builder.isKnownContradictory()) {
+ return CNF;
+ }
+ for (const Formula *Child : F->operands())
+ UnprocessedFormulas.push(Child);
+ }
+
+ // Unit clauses that were added later were not
+ // considered for the simplification of earlier clauses. Do a final
+ // pass to find more opportunities for simplification.
+ CNFFormula FinalCNF(NextVar - 1);
+ CNFFormulaBuilder FinalBuilder(FinalCNF);
+
+ // Collect unit clauses.
+ for (ClauseID C = 1; C <= CNF.numClauses(); ++C) {
+ if (CNF.clauseSize(C) == 1) {
+ FinalBuilder.addClause(CNF.clauseLiterals(C)[0]);
+ }
+ }
+
+ // Add all clauses that were added previously, preserving the order.
+ for (ClauseID C = 1; C <= CNF.numClauses(); ++C) {
+ FinalBuilder.addClause(CNF.clauseLiterals(C));
+ if (FinalBuilder.isKnownContradictory()) {
+ break;
+ }
+ }
+ // It is possible there were new unit clauses again, but
+ // we stop here and leave the rest to the solver algorithm.
+ return FinalCNF;
+}
+
+} // namespace dataflow
+} // namespace clang
diff --git a/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp b/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
index 3ef363753532..a39f0e0b29ad 100644
--- a/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
+++ b/clang/lib/Analysis/FlowSensitive/WatchedLiteralsSolver.cpp
@@ -12,105 +12,31 @@
//===----------------------------------------------------------------------===//
#include <cassert>
-#include <cstddef>
-#include <cstdint>
-#include <queue>
#include <vector>
+#include "clang/Analysis/FlowSensitive/CNFFormula.h"
#include "clang/Analysis/FlowSensitive/Formula.h"
#include "clang/Analysis/FlowSensitive/Solver.h"
#include "clang/Analysis/FlowSensitive/WatchedLiteralsSolver.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseSet.h"
-#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/STLExtras.h"
namespace clang {
namespace dataflow {
-// `WatchedLiteralsSolver` is an implementation of Algorithm D from Knuth's
-// The Art of Computer Programming Volume 4: Satisfiability, Fascicle 6. It is
-// based on the backtracking DPLL algorithm [1], keeps references to a single
-// "watched" literal per clause, and uses a set of "active" variables to perform
-// unit propagation.
-//
-// The solver expects that its input is a boolean formula in conjunctive normal
-// form that consists of clauses of at least one literal. A literal is either a
-// boolean variable or its negation. Below we define types, data structures, and
-// utilities that are used to represent boolean formulas in conjunctive normal
-// form.
-//
-// [1] https://en.wikipedia.org/wiki/DPLL_algorithm
-
-/// Boolean variables are represented as positive integers.
-using Variable = uint32_t;
-
-/// A null boolean variable is used as a placeholder in various data structures
-/// and algorithms.
-static constexpr Variable NullVar = 0;
-
-/// Literals are represented as positive integers. Specifically, for a boolean
-/// variable `V` that is represented as the positive integer `I`, the positive
-/// literal `V` is represented as the integer `2*I` and the negative literal
-/// `!V` is represented as the integer `2*I+1`.
-using Literal = uint32_t;
-
-/// A null literal is used as a placeholder in various data structures and
-/// algorithms.
-[[maybe_unused]] static constexpr Literal NullLit = 0;
-
-/// Returns the positive literal `V`.
-static constexpr Literal posLit(Variable V) { return 2 * V; }
-
-static constexpr bool isPosLit(Literal L) { return 0 == (L & 1); }
-
-static constexpr bool isNegLit(Literal L) { return 1 == (L & 1); }
-
-/// Returns the negative literal `!V`.
-static constexpr Literal negLit(Variable V) { return 2 * V + 1; }
-
-/// Returns the negated literal `!L`.
-static constexpr Literal notLit(Literal L) { return L ^ 1; }
-
-/// Returns the variable of `L`.
-static constexpr Variable var(Literal L) { return L >> 1; }
-
-/// Clause identifiers are represented as positive integers.
-using ClauseID = uint32_t;
-
-/// A null clause identifier is used as a placeholder in various data structures
-/// and algorithms.
-static constexpr ClauseID NullClause = 0;
+namespace {
-/// A boolean formula in conjunctive normal form.
-struct CNFFormula {
- /// `LargestVar` is equal to the largest positive integer that represents a
- /// variable in the formula.
- const Variable LargestVar;
-
- /// Literals of all clauses in the formula.
- ///
- /// The element at index 0 stands for the literal in the null clause. It is
- /// set to 0 and isn't used. Literals of clauses in the formula start from the
- /// element at index 1.
- ///
- /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
- /// `Clauses` will be `[0, L1, L2, L2, L3, L4]`.
- std::vector<Literal> Clauses;
+class WatchedLiteralsSolverImpl {
+ /// Stores the variable identifier and Atom for atomic booleans in the
+ /// formula.
+ llvm::DenseMap<Variable, Atom> Atomics;
- /// Start indices of clauses of the formula in `Clauses`.
- ///
- /// The element at index 0 stands for the start index of the null clause. It
- /// is set to 0 and isn't used. Start indices of clauses in the formula start
- /// from the element at index 1.
- ///
- /// For example, for the formula `(L1 v L2) ^ (L2 v L3 v L4)` the elements of
- /// `ClauseStarts` will be `[0, 1, 3]`. Note that the literals of the first
- /// clause always start at index 1. The start index for the literals of the
- /// second clause depends on the size of the first clause and so on.
- std::vector<size_t> ClauseStarts;
+ /// A boolean formula in conjunctive normal form that the solver will attempt
+ /// to prove satisfiable. The formula will be modified in the process.
+ CNFFormula CNF;
/// Maps literals (indices of the vector) to clause identifiers (elements of
/// the vector) that watch the respective literals.
@@ -127,328 +53,6 @@ struct CNFFormula {
/// clauses in the formula start from the element at index 1.
std::vector<ClauseID> NextWatched;
- /// Stores the variable identifier and Atom for atomic booleans in the
- /// formula.
- llvm::DenseMap<Variable, Atom> Atomics;
-
- /// Indicates that we already know the formula is unsatisfiable.
- /// During construction, we catch simple cases of conflicting unit-clauses.
- bool KnownContradictory;
-
- explicit CNFFormula(Variable LargestVar,
- llvm::DenseMap<Variable, Atom> Atomics)
- : LargestVar(LargestVar), Atomics(std::move(Atomics)),
- KnownContradictory(false) {
- Clauses.push_back(0);
- ClauseStarts.push_back(0);
- NextWatched.push_back(0);
- const size_t NumLiterals = 2 * LargestVar + 1;
- WatchedHead.resize(NumLiterals + 1, 0);
- }
-
- /// Adds the `L1 v ... v Ln` clause to the formula.
- /// Requirements:
- ///
- /// `Li` must not be `NullLit`.
- ///
- /// All literals in the input that are not `NullLit` must be distinct.
- void addClause(ArrayRef<Literal> lits) {
- assert(!lits.empty());
- assert(llvm::all_of(lits, [](Literal L) { return L != NullLit; }));
-
- const ClauseID C = ClauseStarts.size();
- const size_t S = Clauses.size();
- ClauseStarts.push_back(S);
- Clauses.insert(Clauses.end(), lits.begin(), lits.end());
-
- // Designate the first literal as the "watched" literal of the clause.
- NextWatched.push_back(WatchedHead[lits.front()]);
- WatchedHead[lits.front()] = C;
- }
-
- /// Returns the number of literals in clause `C`.
- size_t clauseSize(ClauseID C) const {
- return C == ClauseStarts.size() - 1 ? Clauses.size() - ClauseStarts[C]
- : ClauseStarts[C + 1] - ClauseStarts[C];
- }
-
- /// Returns the literals of clause `C`.
- llvm::ArrayRef<Literal> clauseLiterals(ClauseID C) const {
- return llvm::ArrayRef<Literal>(&Clauses[ClauseStarts[C]], clauseSize(C));
- }
-};
-
-/// Applies simplifications while building up a BooleanFormula.
-/// We keep track of unit clauses, which tell us variables that must be
-/// true/false in any model that satisfies the overall formula.
-/// Such variables can be dropped from subsequently-added clauses, which
-/// may in turn yield more unit clauses or even a contradiction.
-/// The total added complexity of this preprocessing is O(N) where we
-/// for every clause, we do a lookup for each unit clauses.
-/// The lookup is O(1) on average. This method won't catch all
-/// contradictory formulas, more passes can in principle catch
-/// more cases but we leave all these and the general case to the
-/// proper SAT solver.
-struct CNFFormulaBuilder {
- // Formula should outlive CNFFormulaBuilder.
- explicit CNFFormulaBuilder(CNFFormula &CNF)
- : Formula(CNF) {}
-
- /// Adds the `L1 v ... v Ln` clause to the formula. Applies
- /// simplifications, based on single-literal clauses.
- ///
- /// Requirements:
- ///
- /// `Li` must not be `NullLit`.
- ///
- /// All literals must be distinct.
- void addClause(ArrayRef<Literal> Literals) {
- // We generate clauses with up to 3 literals in this file.
- assert(!Literals.empty() && Literals.size() <= 3);
- // Contains literals of the simplified clause.
- llvm::SmallVector<Literal> Simplified;
- for (auto L : Literals) {
- assert(L != NullLit &&
- llvm::all_of(Simplified,
- [L](Literal S) { return S != L; }));
- auto X = var(L);
- if (trueVars.contains(X)) { // X must be true
- if (isPosLit(L))
- return; // Omit clause `(... v X v ...)`, it is `true`.
- else
- continue; // Omit `!X` from `(... v !X v ...)`.
- }
- if (falseVars.contains(X)) { // X must be false
- if (isNegLit(L))
- return; // Omit clause `(... v !X v ...)`, it is `true`.
- else
- continue; // Omit `X` from `(... v X v ...)`.
- }
- Simplified.push_back(L);
- }
- if (Simplified.empty()) {
- // Simplification made the clause empty, which is equivalent to `false`.
- // We already know that this formula is unsatisfiable.
- Formula.KnownContradictory = true;
- // We can add any of the input literals to get an unsatisfiable formula.
- Formula.addClause(Literals[0]);
- return;
- }
- if (Simplified.size() == 1) {
- // We have new unit clause.
- const Literal lit = Simplified.front();
- const Variable v = var(lit);
- if (isPosLit(lit))
- trueVars.insert(v);
- else
- falseVars.insert(v);
- }
- Formula.addClause(Simplified);
- }
-
- /// Returns true if we observed a contradiction while adding clauses.
- /// In this case then the formula is already known to be unsatisfiable.
- bool isKnownContradictory() { return Formula.KnownContradictory; }
-
-private:
- CNFFormula &Formula;
- llvm::DenseSet<Variable> trueVars;
- llvm::DenseSet<Variable> falseVars;
-};
-
-/// Converts the conjunction of `Vals` into a formula in conjunctive normal
-/// form where each clause has at least one and at most three literals.
-CNFFormula buildCNF(const llvm::ArrayRef<const Formula *> &Vals) {
- // The general strategy of the algorithm implemented below is to map each
- // of the sub-values in `Vals` to a unique variable and use these variables in
- // the resulting CNF expression to avoid exponential blow up. The number of
- // literals in the resulting formula is guaranteed to be linear in the number
- // of sub-formulas in `Vals`.
-
- // Map each sub-formula in `Vals` to a unique variable.
- llvm::DenseMap<const Formula *, Variable> SubValsToVar;
- // Store variable identifiers and Atom of atomic booleans.
- llvm::DenseMap<Variable, Atom> Atomics;
- Variable NextVar = 1;
- {
- std::queue<const Formula *> UnprocessedSubVals;
- for (const Formula *Val : Vals)
- UnprocessedSubVals.push(Val);
- while (!UnprocessedSubVals.empty()) {
- Variable Var = NextVar;
- const Formula *Val = UnprocessedSubVals.front();
- UnprocessedSubVals.pop();
-
- if (!SubValsToVar.try_emplace(Val, Var).second)
- continue;
- ++NextVar;
-
- for (const Formula *F : Val->operands())
- UnprocessedSubVals.push(F);
- if (Val->kind() == Formula::AtomRef)
- Atomics[Var] = Val->getAtom();
- }
- }
-
- auto GetVar = [&SubValsToVar](const Formula *Val) {
- auto ValIt = SubValsToVar.find(Val);
- assert(ValIt != SubValsToVar.end());
- return ValIt->second;
- };
-
- CNFFormula CNF(NextVar - 1, std::move(Atomics));
- std::vector<bool> ProcessedSubVals(NextVar, false);
- CNFFormulaBuilder builder(CNF);
-
- // Add a conjunct for each variable that represents a top-level conjunction
- // value in `Vals`.
- for (const Formula *Val : Vals)
- builder.addClause(posLit(GetVar(Val)));
-
- // Add conjuncts that represent the mapping between newly-created variables
- // and their corresponding sub-formulas.
- std::queue<const Formula *> UnprocessedSubVals;
- for (const Formula *Val : Vals)
- UnprocessedSubVals.push(Val);
- while (!UnprocessedSubVals.empty()) {
- const Formula *Val = UnprocessedSubVals.front();
- UnprocessedSubVals.pop();
- const Variable Var = GetVar(Val);
-
- if (ProcessedSubVals[Var])
- continue;
- ProcessedSubVals[Var] = true;
-
- switch (Val->kind()) {
- case Formula::AtomRef:
- break;
- case Formula::Literal:
- CNF.addClause(Val->literal() ? posLit(Var) : negLit(Var));
- break;
- case Formula::And: {
- const Variable LHS = GetVar(Val->operands()[0]);
- const Variable RHS = GetVar(Val->operands()[1]);
-
- if (LHS == RHS) {
- // `X <=> (A ^ A)` is equivalent to `(!X v A) ^ (X v !A)` which is
- // already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), posLit(LHS)});
- builder.addClause({posLit(Var), negLit(LHS)});
- } else {
- // `X <=> (A ^ B)` is equivalent to `(!X v A) ^ (!X v B) ^ (X v !A v
- // !B)` which is already in conjunctive normal form. Below we add each
- // of the conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), posLit(LHS)});
- builder.addClause({negLit(Var), posLit(RHS)});
- builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
- }
- break;
- }
- case Formula::Or: {
- const Variable LHS = GetVar(Val->operands()[0]);
- const Variable RHS = GetVar(Val->operands()[1]);
-
- if (LHS == RHS) {
- // `X <=> (A v A)` is equivalent to `(!X v A) ^ (X v !A)` which is
- // already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), posLit(LHS)});
- builder.addClause({posLit(Var), negLit(LHS)});
- } else {
- // `X <=> (A v B)` is equivalent to `(!X v A v B) ^ (X v !A) ^ (X v
- // !B)` which is already in conjunctive normal form. Below we add each
- // of the conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), posLit(LHS), posLit(RHS)});
- builder.addClause({posLit(Var), negLit(LHS)});
- builder.addClause({posLit(Var), negLit(RHS)});
- }
- break;
- }
- case Formula::Not: {
- const Variable Operand = GetVar(Val->operands()[0]);
-
- // `X <=> !Y` is equivalent to `(!X v !Y) ^ (X v Y)` which is
- // already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- builder.addClause({negLit(Var), negLit(Operand)});
- builder.addClause({posLit(Var), posLit(Operand)});
- break;
- }
- case Formula::Implies: {
- const Variable LHS = GetVar(Val->operands()[0]);
- const Variable RHS = GetVar(Val->operands()[1]);
-
- // `X <=> (A => B)` is equivalent to
- // `(X v A) ^ (X v !B) ^ (!X v !A v B)` which is already in
- // conjunctive normal form. Below we add each of the conjuncts of
- // the latter expression to the result.
- builder.addClause({posLit(Var), posLit(LHS)});
- builder.addClause({posLit(Var), negLit(RHS)});
- builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
- break;
- }
- case Formula::Equal: {
- const Variable LHS = GetVar(Val->operands()[0]);
- const Variable RHS = GetVar(Val->operands()[1]);
-
- if (LHS == RHS) {
- // `X <=> (A <=> A)` is equivalent to `X` which is already in
- // conjunctive normal form. Below we add each of the conjuncts of the
- // latter expression to the result.
- builder.addClause(posLit(Var));
-
- // No need to visit the sub-values of `Val`.
- continue;
- }
- // `X <=> (A <=> B)` is equivalent to
- // `(X v A v B) ^ (X v !A v !B) ^ (!X v A v !B) ^ (!X v !A v B)` which
- // is already in conjunctive normal form. Below we add each of the
- // conjuncts of the latter expression to the result.
- builder.addClause({posLit(Var), posLit(LHS), posLit(RHS)});
- builder.addClause({posLit(Var), negLit(LHS), negLit(RHS)});
- builder.addClause({negLit(Var), posLit(LHS), negLit(RHS)});
- builder.addClause({negLit(Var), negLit(LHS), posLit(RHS)});
- break;
- }
- }
- if (builder.isKnownContradictory()) {
- return CNF;
- }
- for (const Formula *Child : Val->operands())
- UnprocessedSubVals.push(Child);
- }
-
- // Unit clauses that were added later were not
- // considered for the simplification of earlier clauses. Do a final
- // pass to find more opportunities for simplification.
- CNFFormula FinalCNF(NextVar - 1, std::move(CNF.Atomics));
- CNFFormulaBuilder FinalBuilder(FinalCNF);
-
- // Collect unit clauses.
- for (ClauseID C = 1; C < CNF.ClauseStarts.size(); ++C) {
- if (CNF.clauseSize(C) == 1) {
- FinalBuilder.addClause(CNF.clauseLiterals(C)[0]);
- }
- }
-
- // Add all clauses that were added previously, preserving the order.
- for (ClauseID C = 1; C < CNF.ClauseStarts.size(); ++C) {
- FinalBuilder.addClause(CNF.clauseLiterals(C));
- if (FinalBuilder.isKnownContradictory()) {
- break;
- }
- }
- // It is possible there were new unit clauses again, but
- // we stop here and leave the rest to the solver algorithm.
- return FinalCNF;
-}
-
-class WatchedLiteralsSolverImpl {
- /// A boolean formula in conjunctive normal form that the solver will attempt
- /// to prove satisfiable. The formula will be modified in the process.
- CNFFormula CNF;
-
/// The search for a satisfying assignment of the variables in `Formula` will
/// proceed in levels, starting from 1 and going up to `Formula.LargestVar`
/// (inclusive). The current level is stored in `Level`. At each level the
@@ -501,20 +105,37 @@ class WatchedLiteralsSolverImpl {
public:
explicit WatchedLiteralsSolverImpl(
const llvm::ArrayRef<const Formula *> &Vals)
- : CNF(buildCNF(Vals)), LevelVars(CNF.LargestVar + 1),
- LevelStates(CNF.LargestVar + 1) {
+ // `Atomics` needs to be initialized first so that we can use it as an
+ // output argument of `buildCNF()`.
+ : Atomics(), CNF(buildCNF(Vals, Atomics)),
+ LevelVars(CNF.largestVar() + 1), LevelStates(CNF.largestVar() + 1) {
assert(!Vals.empty());
+ // Skip initialization if the formula is known to be contradictory.
+ if (CNF.knownContradictory())
+ return;
+
+ // Initialize `NextWatched` and `WatchedHead`.
+ NextWatched.push_back(0);
+ const size_t NumLiterals = 2 * CNF.largestVar() + 1;
+ WatchedHead.resize(NumLiterals + 1, 0);
+ for (ClauseID C = 1; C <= CNF.numClauses(); ++C) {
+ // Designate the first literal as the "watched" literal of the clause.
+ Literal FirstLit = CNF.clauseLiterals(C).front();
+ NextWatched.push_back(WatchedHead[FirstLit]);
+ WatchedHead[FirstLit] = C;
+ }
+
// Initialize the state at the root level to a decision so that in
// `reverseForcedMoves` we don't have to check that `Level >= 0` on each
// iteration.
LevelStates[0] = State::Decision;
// Initialize all variables as unassigned.
- VarAssignments.resize(CNF.LargestVar + 1, Assignment::Unassigned);
+ VarAssignments.resize(CNF.largestVar() + 1, Assignment::Unassigned);
// Initialize the active variables.
- for (Variable Var = CNF.LargestVar; Var != NullVar; --Var) {
+ for (Variable Var = CNF.largestVar(); Var != NullVar; --Var) {
if (isWatched(posLit(Var)) || isWatched(negLit(Var)))
ActiveVars.push_back(Var);
}
@@ -523,7 +144,7 @@ public:
// Returns the `Result` and the number of iterations "remaining" from
// `MaxIterations` (that is, `MaxIterations` - iterations in this call).
std::pair<Solver::Result, std::int64_t> solve(std::int64_t MaxIterations) && {
- if (CNF.KnownContradictory) {
+ if (CNF.knownContradictory()) {
// Short-cut the solving process. We already found out at CNF
// construction time that the formula is unsatisfiable.
return std::make_pair(Solver::Result::Unsatisfiable(), MaxIterations);
@@ -625,7 +246,7 @@ private:
/// Returns a satisfying truth assignment to the atoms in the boolean formula.
llvm::DenseMap<Atom, Solver::Result::Assignment> buildSolution() {
llvm::DenseMap<Atom, Solver::Result::Assignment> Solution;
- for (auto &Atomic : CNF.Atomics) {
+ for (auto &Atomic : Atomics) {
// A variable may have a definite true/false assignment, or it may be
// unassigned indicating its truth value does not affect the result of
// the formula. Unassigned variables are assigned to true as a default.
@@ -661,24 +282,25 @@ private:
const Literal FalseLit = VarAssignments[Var] == Assignment::AssignedTrue
? negLit(Var)
: posLit(Var);
- ClauseID FalseLitWatcher = CNF.WatchedHead[FalseLit];
- CNF.WatchedHead[FalseLit] = NullClause;
+ ClauseID FalseLitWatcher = WatchedHead[FalseLit];
+ WatchedHead[FalseLit] = NullClause;
while (FalseLitWatcher != NullClause) {
- const ClauseID NextFalseLitWatcher = CNF.NextWatched[FalseLitWatcher];
+ const ClauseID NextFalseLitWatcher = NextWatched[FalseLitWatcher];
// Pick the first non-false literal as the new watched literal.
- const size_t FalseLitWatcherStart = CNF.ClauseStarts[FalseLitWatcher];
- size_t NewWatchedLitIdx = FalseLitWatcherStart + 1;
- while (isCurrentlyFalse(CNF.Clauses[NewWatchedLitIdx]))
- ++NewWatchedLitIdx;
- const Literal NewWatchedLit = CNF.Clauses[NewWatchedLitIdx];
+ const CNFFormula::Iterator FalseLitWatcherStart =
+ CNF.startOfClause(FalseLitWatcher);
+ CNFFormula::Iterator NewWatchedLitIter = FalseLitWatcherStart.next();
+ while (isCurrentlyFalse(*NewWatchedLitIter))
+ ++NewWatchedLitIter;
+ const Literal NewWatchedLit = *NewWatchedLitIter;
const Variable NewWatchedLitVar = var(NewWatchedLit);
// Swap the old watched literal for the new one in `FalseLitWatcher` to
// maintain the invariant that the watched literal is at the beginning of
// the clause.
- CNF.Clauses[NewWatchedLitIdx] = FalseLit;
- CNF.Clauses[FalseLitWatcherStart] = NewWatchedLit;
+ *NewWatchedLitIter = FalseLit;
+ *FalseLitWatcherStart = NewWatchedLit;
// If the new watched literal isn't watched by any other clause and its
// variable isn't assigned we need to add it to the active variables.
@@ -686,8 +308,8 @@ private:
VarAssignments[NewWatchedLitVar] == Assignment::Unassigned)
ActiveVars.push_back(NewWatchedLitVar);
- CNF.NextWatched[FalseLitWatcher] = CNF.WatchedHead[NewWatchedLit];
- CNF.WatchedHead[NewWatchedLit] = FalseLitWatcher;
+ NextWatched[FalseLitWatcher] = WatchedHead[NewWatchedLit];
+ WatchedHead[NewWatchedLit] = FalseLitWatcher;
// Go to the next clause that watches `FalseLit`.
FalseLitWatcher = NextFalseLitWatcher;
@@ -697,8 +319,8 @@ private:
/// Returns true if and only if one of the clauses that watch `Lit` is a unit
/// clause.
bool watchedByUnitClause(Literal Lit) const {
- for (ClauseID LitWatcher = CNF.WatchedHead[Lit]; LitWatcher != NullClause;
- LitWatcher = CNF.NextWatched[LitWatcher]) {
+ for (ClauseID LitWatcher = WatchedHead[Lit]; LitWatcher != NullClause;
+ LitWatcher = NextWatched[LitWatcher]) {
llvm::ArrayRef<Literal> Clause = CNF.clauseLiterals(LitWatcher);
// Assert the invariant that the watched literal is always the first one
@@ -728,9 +350,7 @@ private:
}
/// Returns true if and only if `Lit` is watched by a clause in `Formula`.
- bool isWatched(Literal Lit) const {
- return CNF.WatchedHead[Lit] != NullClause;
- }
+ bool isWatched(Literal Lit) const { return WatchedHead[Lit] != NullClause; }
/// Returns an assignment for an unassigned variable.
Assignment decideAssignment(Variable Var) const {
@@ -742,8 +362,8 @@ private:
/// Returns a set of all watched literals.
llvm::DenseSet<Literal> watchedLiterals() const {
llvm::DenseSet<Literal> WatchedLiterals;
- for (Literal Lit = 2; Lit < CNF.WatchedHead.size(); Lit++) {
- if (CNF.WatchedHead[Lit] == NullClause)
+ for (Literal Lit = 2; Lit < WatchedHead.size(); Lit++) {
+ if (WatchedHead[Lit] == NullClause)
continue;
WatchedLiterals.insert(Lit);
}
@@ -783,6 +403,8 @@ private:
}
};
+} // namespace
+
Solver::Result
WatchedLiteralsSolver::solve(llvm::ArrayRef<const Formula *> Vals) {
if (Vals.empty())
diff --git a/clang/lib/Basic/Targets/Mips.h b/clang/lib/Basic/Targets/Mips.h
index f76c6ece8bf4..b6f110249fa7 100644
--- a/clang/lib/Basic/Targets/Mips.h
+++ b/clang/lib/Basic/Targets/Mips.h
@@ -324,6 +324,7 @@ public:
FPMode = getDefaultFPMode();
bool OddSpregGiven = false;
bool StrictAlign = false;
+ bool FpGiven = false;
for (const auto &Feature : Features) {
if (Feature == "+single-float")
@@ -348,13 +349,16 @@ public:
HasMSA = true;
else if (Feature == "+nomadd4")
DisableMadd4 = true;
- else if (Feature == "+fp64")
+ else if (Feature == "+fp64") {
FPMode = FP64;
- else if (Feature == "-fp64")
+ FpGiven = true;
+ } else if (Feature == "-fp64") {
FPMode = FP32;
- else if (Feature == "+fpxx")
+ FpGiven = true;
+ } else if (Feature == "+fpxx") {
FPMode = FPXX;
- else if (Feature == "+nan2008")
+ FpGiven = true;
+ } else if (Feature == "+nan2008")
IsNan2008 = true;
else if (Feature == "-nan2008")
IsNan2008 = false;
@@ -381,6 +385,11 @@ public:
if (StrictAlign)
HasUnalignedAccess = false;
+ if (HasMSA && !FpGiven) {
+ FPMode = FP64;
+ Features.push_back("+fp64");
+ }
+
setDataLayout();
return true;
diff --git a/clang/lib/CodeGen/CGAtomic.cpp b/clang/lib/CodeGen/CGAtomic.cpp
index 07452b18a85e..fbf942d06ca6 100644
--- a/clang/lib/CodeGen/CGAtomic.cpp
+++ b/clang/lib/CodeGen/CGAtomic.cpp
@@ -150,7 +150,7 @@ namespace {
Address getAtomicAddress() const {
llvm::Type *ElTy;
if (LVal.isSimple())
- ElTy = LVal.getAddress(CGF).getElementType();
+ ElTy = LVal.getAddress().getElementType();
else if (LVal.isBitField())
ElTy = LVal.getBitFieldAddress().getElementType();
else if (LVal.isVectorElt())
@@ -363,7 +363,7 @@ bool AtomicInfo::requiresMemSetZero(llvm::Type *type) const {
bool AtomicInfo::emitMemSetZeroIfNecessary() const {
assert(LVal.isSimple());
- Address addr = LVal.getAddress(CGF);
+ Address addr = LVal.getAddress();
if (!requiresMemSetZero(addr.getElementType()))
return false;
@@ -1603,7 +1603,7 @@ Address AtomicInfo::materializeRValue(RValue rvalue) const {
LValue TempLV = CGF.MakeAddrLValue(CreateTempAlloca(), getAtomicType());
AtomicInfo Atomics(CGF, TempLV);
Atomics.emitCopyIntoMemory(rvalue);
- return TempLV.getAddress(CGF);
+ return TempLV.getAddress();
}
llvm::Value *AtomicInfo::getScalarRValValueOrNull(RValue RVal) const {
@@ -1951,7 +1951,7 @@ void CodeGenFunction::EmitAtomicStore(RValue rvalue, LValue dest,
// maybe for address-space qualification.
assert(!rvalue.isAggregate() ||
rvalue.getAggregateAddress().getElementType() ==
- dest.getAddress(*this).getElementType());
+ dest.getAddress().getElementType());
AtomicInfo atomics(*this, dest);
LValue LVal = atomics.getAtomicLValue();
@@ -2024,10 +2024,10 @@ std::pair<RValue, llvm::Value *> CodeGenFunction::EmitAtomicCompareExchange(
// maybe for address-space qualification.
assert(!Expected.isAggregate() ||
Expected.getAggregateAddress().getElementType() ==
- Obj.getAddress(*this).getElementType());
+ Obj.getAddress().getElementType());
assert(!Desired.isAggregate() ||
Desired.getAggregateAddress().getElementType() ==
- Obj.getAddress(*this).getElementType());
+ Obj.getAddress().getElementType());
AtomicInfo Atomics(*this, Obj);
return Atomics.EmitAtomicCompareExchange(Expected, Desired, Success, Failure,
@@ -2068,7 +2068,7 @@ void CodeGenFunction::EmitAtomicInit(Expr *init, LValue dest) {
// Evaluate the expression directly into the destination.
AggValueSlot slot = AggValueSlot::forLValue(
- dest, *this, AggValueSlot::IsNotDestructed,
+ dest, AggValueSlot::IsNotDestructed,
AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap,
Zeroed ? AggValueSlot::IsZeroed : AggValueSlot::IsNotZeroed);
diff --git a/clang/lib/CodeGen/CGBlocks.cpp b/clang/lib/CodeGen/CGBlocks.cpp
index 2742c39965b2..bf50f2025de5 100644
--- a/clang/lib/CodeGen/CGBlocks.cpp
+++ b/clang/lib/CodeGen/CGBlocks.cpp
@@ -927,7 +927,7 @@ llvm::Value *CodeGenFunction::EmitBlockLiteral(const CGBlockInfo &blockInfo) {
/*RefersToEnclosingVariableOrCapture*/ CI.isNested(),
type.getNonReferenceType(), VK_LValue,
SourceLocation());
- src = EmitDeclRefLValue(&declRef).getAddress(*this);
+ src = EmitDeclRefLValue(&declRef).getAddress();
};
// For byrefs, we just write the pointer to the byref struct into
diff --git a/clang/lib/CodeGen/CGBuiltin.cpp b/clang/lib/CodeGen/CGBuiltin.cpp
index e251091c6ce3..ba94bf89e475 100644
--- a/clang/lib/CodeGen/CGBuiltin.cpp
+++ b/clang/lib/CodeGen/CGBuiltin.cpp
@@ -5609,8 +5609,8 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Value *Queue = EmitScalarExpr(E->getArg(0));
llvm::Value *Flags = EmitScalarExpr(E->getArg(1));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(2));
- llvm::Value *Range = NDRangeL.getAddress(*this).emitRawPointer(*this);
- llvm::Type *RangeTy = NDRangeL.getAddress(*this).getType();
+ llvm::Value *Range = NDRangeL.getAddress().emitRawPointer(*this);
+ llvm::Type *RangeTy = NDRangeL.getAddress().getType();
if (NumArgs == 4) {
// The most basic form of the call with parameters:
@@ -5629,7 +5629,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
Builder.CreatePointerCast(Info.BlockArg, GenericVoidPtrTy);
AttrBuilder B(Builder.getContext());
- B.addByValAttr(NDRangeL.getAddress(*this).getElementType());
+ B.addByValAttr(NDRangeL.getAddress().getElementType());
llvm::AttributeList ByValAttrSet =
llvm::AttributeList::get(CGM.getModule().getContext(), 3U, B);
@@ -5817,7 +5817,7 @@ RValue CodeGenFunction::EmitBuiltinExpr(const GlobalDecl GD, unsigned BuiltinID,
llvm::Type *GenericVoidPtrTy = Builder.getPtrTy(
getContext().getTargetAddressSpace(LangAS::opencl_generic));
LValue NDRangeL = EmitAggExprToLValue(E->getArg(0));
- llvm::Value *NDRange = NDRangeL.getAddress(*this).emitRawPointer(*this);
+ llvm::Value *NDRange = NDRangeL.getAddress().emitRawPointer(*this);
auto Info =
CGM.getOpenCLRuntime().emitOpenCLEnqueuedBlock(*this, E->getArg(1));
Value *Kernel =
@@ -21592,7 +21592,7 @@ Value *CodeGenFunction::EmitRISCVBuiltinExpr(unsigned BuiltinID,
// Handle aggregate argument, namely RVV tuple types in segment load/store
if (hasAggregateEvaluationKind(E->getArg(i)->getType())) {
LValue L = EmitAggExprToLValue(E->getArg(i));
- llvm::Value *AggValue = Builder.CreateLoad(L.getAddress(*this));
+ llvm::Value *AggValue = Builder.CreateLoad(L.getAddress());
Ops.push_back(AggValue);
continue;
}
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index 1b4ca2a8b2fe..97449a5e51e7 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -1051,12 +1051,12 @@ void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
forConstantArrayExpansion(
- *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
+ *this, CAExp, LV.getAddress(), [&](Address EltAddr) {
LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
});
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
- Address This = LV.getAddress(*this);
+ Address This = LV.getAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
Address Base =
@@ -1088,7 +1088,7 @@ void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
// pointer type they use (see D118744). Once clang uses opaque pointers
// all LLVM pointer types will be the same and we can remove this check.
if (Arg->getType()->isPointerTy()) {
- Address Addr = LV.getAddress(*this);
+ Address Addr = LV.getAddress();
Arg = Builder.CreateBitCast(Arg, Addr.getElementType());
}
EmitStoreOfScalar(Arg, LV);
@@ -1101,7 +1101,7 @@ void CodeGenFunction::ExpandTypeToArgs(
SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
auto Exp = getTypeExpansion(Ty, getContext());
if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
- Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
+ Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
: Arg.getKnownRValue().getAggregateAddress();
forConstantArrayExpansion(
*this, CAExp, Addr, [&](Address EltAddr) {
@@ -1112,7 +1112,7 @@ void CodeGenFunction::ExpandTypeToArgs(
IRCallArgPos);
});
} else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
- Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
+ Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress()
: Arg.getKnownRValue().getAggregateAddress();
for (const CXXBaseSpecifier *BS : RExp->Bases) {
// Perform a single step derived-to-base conversion.
@@ -4136,7 +4136,7 @@ static bool isProvablyNonNull(Address Addr, CodeGenFunction &CGF) {
static void emitWriteback(CodeGenFunction &CGF,
const CallArgList::Writeback &writeback) {
const LValue &srcLV = writeback.Source;
- Address srcAddr = srcLV.getAddress(CGF);
+ Address srcAddr = srcLV.getAddress();
assert(!isProvablyNull(srcAddr.getBasePointer()) &&
"shouldn't have writeback for provably null argument");
@@ -4243,7 +4243,7 @@ static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
}
- Address srcAddr = srcLV.getAddress(CGF);
+ Address srcAddr = srcLV.getAddress();
// The dest and src types don't necessarily match in LLVM terms
// because of the crazy ObjC compatibility rules.
@@ -4649,7 +4649,7 @@ RValue CallArg::getRValue(CodeGenFunction &CGF) const {
CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
LV.isVolatile());
IsUsed = true;
- return RValue::getAggregate(Copy.getAddress(CGF));
+ return RValue::getAggregate(Copy.getAddress());
}
void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
@@ -4659,7 +4659,7 @@ void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
else if (!HasLV && RV.isComplex())
CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
else {
- auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
+ auto Addr = HasLV ? LV.getAddress() : RV.getAggregateAddress();
LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
// We assume that call args are never copied into subobjects.
CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
@@ -5147,7 +5147,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
if (I->isAggregate()) {
RawAddress Addr = I->hasLValue()
- ? I->getKnownLValue().getAddress(*this)
+ ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
llvm::Instruction *Placeholder =
cast<llvm::Instruction>(Addr.getPointer());
@@ -5192,18 +5192,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
case ABIArgInfo::Indirect:
case ABIArgInfo::IndirectAliased: {
assert(NumIRArgs == 1);
- if (!I->isAggregate()) {
- // Make a temporary alloca to pass the argument.
- RawAddress Addr = CreateMemTempWithoutCast(
- I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
-
- llvm::Value *Val = getAsNaturalPointerTo(Addr, I->Ty);
- if (ArgHasMaybeUndefAttr)
- Val = Builder.CreateFreeze(Val);
- IRCallArgs[FirstIRArg] = Val;
-
- I->copyInto(*this, Addr);
- } else {
+ if (I->isAggregate()) {
// We want to avoid creating an unnecessary temporary+copy here;
// however, we need one in three cases:
// 1. If the argument is not byval, and we are required to copy the
@@ -5213,7 +5202,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// 3. If the argument is byval, but RV is not located in default
// or alloca address space.
Address Addr = I->hasLValue()
- ? I->getKnownLValue().getAddress(*this)
+ ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
CharUnits Align = ArgInfo.getIndirectAlign();
const llvm::DataLayout *TD = &CGM.getDataLayout();
@@ -5256,28 +5245,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
}
}
- if (NeedCopy) {
- // Create an aligned temporary, and copy to it.
- RawAddress AI = CreateMemTempWithoutCast(
- I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
- llvm::Value *Val = getAsNaturalPointerTo(AI, I->Ty);
- if (ArgHasMaybeUndefAttr)
- Val = Builder.CreateFreeze(Val);
- IRCallArgs[FirstIRArg] = Val;
-
- // Emit lifetime markers for the temporary alloca.
- llvm::TypeSize ByvalTempElementSize =
- CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
- llvm::Value *LifetimeSize =
- EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
-
- // Add cleanup code to emit the end lifetime marker after the call.
- if (LifetimeSize) // In case we disabled lifetime markers.
- CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
-
- // Generate the copy.
- I->copyInto(*this, AI);
- } else {
+ if (!NeedCopy) {
// Skip the extra memcpy call.
llvm::Value *V = getAsNaturalPointerTo(Addr, I->Ty);
auto *T = llvm::PointerType::get(
@@ -5289,8 +5257,31 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
if (ArgHasMaybeUndefAttr)
Val = Builder.CreateFreeze(Val);
IRCallArgs[FirstIRArg] = Val;
+ break;
}
}
+
+ // For non-aggregate args and aggregate args meeting conditions above
+ // we need to create an aligned temporary, and copy to it.
+ RawAddress AI = CreateMemTempWithoutCast(
+ I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
+ llvm::Value *Val = getAsNaturalPointerTo(AI, I->Ty);
+ if (ArgHasMaybeUndefAttr)
+ Val = Builder.CreateFreeze(Val);
+ IRCallArgs[FirstIRArg] = Val;
+
+ // Emit lifetime markers for the temporary alloca.
+ llvm::TypeSize ByvalTempElementSize =
+ CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
+ llvm::Value *LifetimeSize =
+ EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
+
+ // Add cleanup code to emit the end lifetime marker after the call.
+ if (LifetimeSize) // In case we disabled lifetime markers.
+ CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
+
+ // Generate the copy.
+ I->copyInto(*this, AI);
break;
}
@@ -5309,7 +5300,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
V = I->getKnownRValue().getScalarVal();
else
V = Builder.CreateLoad(
- I->hasLValue() ? I->getKnownLValue().getAddress(*this)
+ I->hasLValue() ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress());
// Implement swifterror by copying into a new swifterror argument.
@@ -5372,7 +5363,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Src = CreateMemTemp(I->Ty, "coerce");
I->copyInto(*this, Src);
} else {
- Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
+ Src = I->hasLValue() ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
}
@@ -5459,7 +5450,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
Address addr = Address::invalid();
RawAddress AllocaAddr = RawAddress::invalid();
if (I->isAggregate()) {
- addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
+ addr = I->hasLValue() ? I->getKnownLValue().getAddress()
: I->getKnownRValue().getAggregateAddress();
} else {
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index b3077292f4a2..b8cb78266130 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -680,7 +680,7 @@ static void EmitMemberInitializer(CodeGenFunction &CGF,
// the constructor.
QualType::DestructionKind dtorKind = FieldType.isDestructedType();
if (CGF.needsEHCleanup(dtorKind))
- CGF.pushEHDestroy(dtorKind, LHS.getAddress(CGF), FieldType);
+ CGF.pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
return;
}
}
@@ -705,9 +705,9 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
break;
case TEK_Aggregate: {
AggValueSlot Slot = AggValueSlot::forLValue(
- LHS, *this, AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
- getOverlapForFieldInit(Field), AggValueSlot::IsNotZeroed,
+ LHS, AggValueSlot::IsDestructed, AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased, getOverlapForFieldInit(Field),
+ AggValueSlot::IsNotZeroed,
// Checks are made by the code that calls constructor.
AggValueSlot::IsSanitizerChecked);
EmitAggExpr(Init, Slot);
@@ -719,7 +719,7 @@ void CodeGenFunction::EmitInitializerForField(FieldDecl *Field, LValue LHS,
// later in the constructor.
QualType::DestructionKind dtorKind = FieldType.isDestructedType();
if (needsEHCleanup(dtorKind))
- pushEHDestroy(dtorKind, LHS.getAddress(*this), FieldType);
+ pushEHDestroy(dtorKind, LHS.getAddress(), FieldType);
}
/// Checks whether the given constructor is a valid subject for the
@@ -983,8 +983,8 @@ namespace {
LValue Src = CGF.EmitLValueForFieldInitialization(SrcLV, FirstField);
emitMemcpyIR(
- Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(CGF),
- Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(CGF),
+ Dest.isBitField() ? Dest.getBitFieldAddress() : Dest.getAddress(),
+ Src.isBitField() ? Src.getBitFieldAddress() : Src.getAddress(),
MemcpySize);
reset();
}
@@ -1131,7 +1131,7 @@ namespace {
continue;
LValue FieldLHS = LHS;
EmitLValueForAnyFieldInitialization(CGF, MemberInit, FieldLHS);
- CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(CGF), FieldType);
+ CGF.pushEHDestroy(dtorKind, FieldLHS.getAddress(), FieldType);
}
}
@@ -1647,7 +1647,7 @@ namespace {
LValue LV = CGF.EmitLValueForField(ThisLV, field);
assert(LV.isSimple());
- CGF.emitDestroy(LV.getAddress(CGF), field->getType(), destroyer,
+ CGF.emitDestroy(LV.getAddress(), field->getType(), destroyer,
flags.isForNormalCleanup() && useEHCleanupForArray);
}
};
diff --git a/clang/lib/CodeGen/CGDecl.cpp b/clang/lib/CodeGen/CGDecl.cpp
index 9cc67cdbe424..4a213990d1e3 100644
--- a/clang/lib/CodeGen/CGDecl.cpp
+++ b/clang/lib/CodeGen/CGDecl.cpp
@@ -738,18 +738,17 @@ static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
LValue srcLV = CGF.EmitLValue(srcExpr);
// Handle a formal type change to avoid asserting.
- auto srcAddr = srcLV.getAddress(CGF);
+ auto srcAddr = srcLV.getAddress();
if (needsCast) {
- srcAddr =
- srcAddr.withElementType(destLV.getAddress(CGF).getElementType());
+ srcAddr = srcAddr.withElementType(destLV.getAddress().getElementType());
}
// If it was an l-value, use objc_copyWeak.
if (srcExpr->isLValue()) {
- CGF.EmitARCCopyWeak(destLV.getAddress(CGF), srcAddr);
+ CGF.EmitARCCopyWeak(destLV.getAddress(), srcAddr);
} else {
assert(srcExpr->isXValue());
- CGF.EmitARCMoveWeak(destLV.getAddress(CGF), srcAddr);
+ CGF.EmitARCMoveWeak(destLV.getAddress(), srcAddr);
}
return true;
}
@@ -767,7 +766,7 @@ static bool tryEmitARCCopyWeakInit(CodeGenFunction &CGF,
static void drillIntoBlockVariable(CodeGenFunction &CGF,
LValue &lvalue,
const VarDecl *var) {
- lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(CGF), var));
+ lvalue.setAddress(CGF.emitBlockByrefAddress(lvalue.getAddress(), var));
}
void CodeGenFunction::EmitNullabilityCheck(LValue LHS, llvm::Value *RHS,
@@ -826,18 +825,17 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
if (capturedByInit) {
// We can use a simple GEP for this because it can't have been
// moved yet.
- tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(*this),
+ tempLV.setAddress(emitBlockByrefAddress(tempLV.getAddress(),
cast<VarDecl>(D),
/*follow*/ false));
}
- auto ty =
- cast<llvm::PointerType>(tempLV.getAddress(*this).getElementType());
+ auto ty = cast<llvm::PointerType>(tempLV.getAddress().getElementType());
llvm::Value *zero = CGM.getNullPointer(ty, tempLV.getType());
// If __weak, we want to use a barrier under certain conditions.
if (lifetime == Qualifiers::OCL_Weak)
- EmitARCInitWeak(tempLV.getAddress(*this), zero);
+ EmitARCInitWeak(tempLV.getAddress(), zero);
// Otherwise just do a simple store.
else
@@ -880,9 +878,9 @@ void CodeGenFunction::EmitScalarInit(const Expr *init, const ValueDecl *D,
if (capturedByInit) drillIntoBlockVariable(*this, lvalue, cast<VarDecl>(D));
if (accessedByInit)
- EmitARCStoreWeak(lvalue.getAddress(*this), value, /*ignored*/ true);
+ EmitARCStoreWeak(lvalue.getAddress(), value, /*ignored*/ true);
else
- EmitARCInitWeak(lvalue.getAddress(*this), value);
+ EmitARCInitWeak(lvalue.getAddress(), value);
return;
}
@@ -1620,7 +1618,7 @@ CodeGenFunction::EmitAutoVarAlloca(const VarDecl &D) {
LValue Base = MakeAddrLValue(AddrSizePair.first, D.getType(),
CGM.getContext().getDeclAlign(&D),
AlignmentSource::Decl);
- address = Base.getAddress(*this);
+ address = Base.getAddress();
// Push a cleanup block to emit the call to __kmpc_free_shared in the
// appropriate location at the end of the scope of the
@@ -2034,10 +2032,10 @@ void CodeGenFunction::EmitExprAsInit(const Expr *init, const ValueDecl *D,
else if (auto *FD = dyn_cast<FieldDecl>(D))
Overlap = getOverlapForFieldInit(FD);
// TODO: how can we delay here if D is captured by its initializer?
- EmitAggExpr(init, AggValueSlot::forLValue(
- lvalue, *this, AggValueSlot::IsDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased, Overlap));
+ EmitAggExpr(init,
+ AggValueSlot::forLValue(lvalue, AggValueSlot::IsDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased, Overlap));
}
return;
}
@@ -2683,7 +2681,7 @@ void CodeGenFunction::EmitParmDecl(const VarDecl &D, ParamValue Arg,
// objc_storeStrong attempts to release its old value.
llvm::Value *Null = CGM.EmitNullConstant(D.getType());
EmitStoreOfScalar(Null, lv, /* isInitialization */ true);
- EmitARCStoreStrongCall(lv.getAddress(*this), ArgVal, true);
+ EmitARCStoreStrongCall(lv.getAddress(), ArgVal, true);
DoStore = false;
}
else
diff --git a/clang/lib/CodeGen/CGDeclCXX.cpp b/clang/lib/CodeGen/CGDeclCXX.cpp
index e08a1e5f42df..b047279912f6 100644
--- a/clang/lib/CodeGen/CGDeclCXX.cpp
+++ b/clang/lib/CodeGen/CGDeclCXX.cpp
@@ -57,7 +57,7 @@ static void EmitDeclInit(CodeGenFunction &CGF, const VarDecl &D,
return;
case TEK_Aggregate:
CGF.EmitAggExpr(Init,
- AggValueSlot::forLValue(lv, CGF, AggValueSlot::IsDestructed,
+ AggValueSlot::forLValue(lv, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap));
diff --git a/clang/lib/CodeGen/CGException.cpp b/clang/lib/CodeGen/CGException.cpp
index 8acda3f2eb86..bb2ed237ee9f 100644
--- a/clang/lib/CodeGen/CGException.cpp
+++ b/clang/lib/CodeGen/CGException.cpp
@@ -1989,8 +1989,7 @@ void CodeGenFunction::EmitCapturedLocals(CodeGenFunction &ParentCGF,
LValue ThisFieldLValue =
EmitLValueForLambdaField(LambdaThisCaptureField);
if (!LambdaThisCaptureField->getType()->isPointerType()) {
- CXXThisValue =
- ThisFieldLValue.getAddress(*this).emitRawPointer(*this);
+ CXXThisValue = ThisFieldLValue.getAddress().emitRawPointer(*this);
} else {
CXXThisValue = EmitLoadOfLValue(ThisFieldLValue, SourceLocation())
.getScalarVal();
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index d96c7bb1e568..cd1c48b42038 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -605,7 +605,7 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
LV = EmitLValueForField(LV, Adjustment.Field);
assert(LV.isSimple() &&
"materialized temporary field is not a simple lvalue");
- Object = LV.getAddress(*this);
+ Object = LV.getAddress();
break;
}
@@ -1123,7 +1123,7 @@ llvm::Value *CodeGenFunction::EmitCountedByFieldExpr(
getPointerAlign(), "dre.load");
} else if (const MemberExpr *ME = dyn_cast<MemberExpr>(StructBase)) {
LValue LV = EmitMemberExpr(ME);
- Address Addr = LV.getAddress(*this);
+ Address Addr = LV.getAddress();
Res = Addr.emitRawPointer(*this);
} else if (StructBase->getType()->isPointerType()) {
LValueBaseInfo BaseInfo;
@@ -1353,7 +1353,7 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
LValue LV = CGF.EmitLValue(UO->getSubExpr(), IsKnownNonNull);
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
- return LV.getAddress(CGF);
+ return LV.getAddress();
}
}
@@ -1368,7 +1368,7 @@ static Address EmitPointerWithAlignment(const Expr *E, LValueBaseInfo *BaseInfo,
LValue LV = CGF.EmitLValue(Call->getArg(0), IsKnownNonNull);
if (BaseInfo) *BaseInfo = LV.getBaseInfo();
if (TBAAInfo) *TBAAInfo = LV.getTBAAInfo();
- return LV.getAddress(CGF);
+ return LV.getAddress();
}
}
}
@@ -1590,7 +1590,7 @@ LValue CodeGenFunction::EmitLValueHelper(const Expr *E,
if (LV.isSimple()) {
// Defend against branches out of gnu statement expressions surrounded by
// cleanups.
- Address Addr = LV.getAddress(*this);
+ Address Addr = LV.getAddress();
llvm::Value *V = Addr.getBasePointer();
Scope.ForceCleanup({&V});
Addr.replaceBasePointer(V);
@@ -1839,7 +1839,7 @@ llvm::Value *CodeGenFunction::emitScalarConstant(
llvm::Value *CodeGenFunction::EmitLoadOfScalar(LValue lvalue,
SourceLocation Loc) {
- return EmitLoadOfScalar(lvalue.getAddress(*this), lvalue.isVolatile(),
+ return EmitLoadOfScalar(lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getType(), Loc, lvalue.getBaseInfo(),
lvalue.getTBAAInfo(), lvalue.isNontemporal());
}
@@ -2076,7 +2076,7 @@ static RawAddress MaybeConvertMatrixAddress(RawAddress Addr,
// (VectorType).
static void EmitStoreOfMatrixScalar(llvm::Value *value, LValue lvalue,
bool isInit, CodeGenFunction &CGF) {
- Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(CGF), CGF,
+ Address Addr = MaybeConvertMatrixAddress(lvalue.getAddress(), CGF,
value->getType()->isVectorTy());
CGF.EmitStoreOfScalar(value, Addr, lvalue.isVolatile(), lvalue.getType(),
lvalue.getBaseInfo(), lvalue.getTBAAInfo(), isInit,
@@ -2146,7 +2146,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
return;
}
- EmitStoreOfScalar(value, lvalue.getAddress(*this), lvalue.isVolatile(),
+ EmitStoreOfScalar(value, lvalue.getAddress(), lvalue.isVolatile(),
lvalue.getType(), lvalue.getBaseInfo(),
lvalue.getTBAAInfo(), isInit, lvalue.isNontemporal());
}
@@ -2156,7 +2156,7 @@ void CodeGenFunction::EmitStoreOfScalar(llvm::Value *value, LValue lvalue,
static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
CodeGenFunction &CGF) {
assert(LV.getType()->isConstantMatrixType());
- Address Addr = MaybeConvertMatrixAddress(LV.getAddress(CGF), CGF);
+ Address Addr = MaybeConvertMatrixAddress(LV.getAddress(), CGF);
LV.setAddress(Addr);
return RValue::get(CGF.EmitLoadOfScalar(LV, Loc));
}
@@ -2167,18 +2167,18 @@ static RValue EmitLoadOfMatrixLValue(LValue LV, SourceLocation Loc,
RValue CodeGenFunction::EmitLoadOfLValue(LValue LV, SourceLocation Loc) {
if (LV.isObjCWeak()) {
// load of a __weak object.
- Address AddrWeakObj = LV.getAddress(*this);
+ Address AddrWeakObj = LV.getAddress();
return RValue::get(CGM.getObjCRuntime().EmitObjCWeakRead(*this,
AddrWeakObj));
}
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
// In MRC mode, we do a load+autorelease.
if (!getLangOpts().ObjCAutoRefCount) {
- return RValue::get(EmitARCLoadWeak(LV.getAddress(*this)));
+ return RValue::get(EmitARCLoadWeak(LV.getAddress()));
}
// In ARC mode, we load retained and then consume the value.
- llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress(*this));
+ llvm::Value *Object = EmitARCLoadWeakRetained(LV.getAddress());
Object = EmitObjCConsumeObject(LV.getType(), Object);
return RValue::get(Object);
}
@@ -2413,9 +2413,9 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
case Qualifiers::OCL_Weak:
if (isInit)
// Initialize and then skip the primitive store.
- EmitARCInitWeak(Dst.getAddress(*this), Src.getScalarVal());
+ EmitARCInitWeak(Dst.getAddress(), Src.getScalarVal());
else
- EmitARCStoreWeak(Dst.getAddress(*this), Src.getScalarVal(),
+ EmitARCStoreWeak(Dst.getAddress(), Src.getScalarVal(),
/*ignore*/ true);
return;
@@ -2429,7 +2429,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isObjCWeak() && !Dst.isNonGC()) {
// load of a __weak object.
- Address LvalueDst = Dst.getAddress(*this);
+ Address LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
CGM.getObjCRuntime().EmitObjCWeakAssign(*this, src, LvalueDst);
return;
@@ -2437,7 +2437,7 @@ void CodeGenFunction::EmitStoreThroughLValue(RValue Src, LValue Dst,
if (Dst.isObjCStrong() && !Dst.isNonGC()) {
// load of a __strong object.
- Address LvalueDst = Dst.getAddress(*this);
+ Address LvalueDst = Dst.getAddress();
llvm::Value *src = Src.getScalarVal();
if (Dst.isObjCIvar()) {
assert(Dst.getBaseIvarExp() && "BaseIvarExp is NULL");
@@ -2777,7 +2777,7 @@ CodeGenFunction::EmitLoadOfReference(LValue RefLVal,
LValueBaseInfo *PointeeBaseInfo,
TBAAAccessInfo *PointeeTBAAInfo) {
llvm::LoadInst *Load =
- Builder.CreateLoad(RefLVal.getAddress(*this), RefLVal.isVolatile());
+ Builder.CreateLoad(RefLVal.getAddress(), RefLVal.isVolatile());
CGM.DecorateInstructionWithTBAA(Load, RefLVal.getTBAAInfo());
return makeNaturalAddressForPointer(Load, RefLVal.getType()->getPointeeType(),
CharUnits(), /*ForPointeeType=*/true,
@@ -3027,7 +3027,7 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
LValue CapLVal =
EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
CapturedStmtInfo->getContextValue());
- Address LValueAddress = CapLVal.getAddress(*this);
+ Address LValueAddress = CapLVal.getAddress();
CapLVal = MakeAddrLValue(Address(LValueAddress.emitRawPointer(*this),
LValueAddress.getElementType(),
getContext().getDeclAlign(VD)),
@@ -3217,7 +3217,7 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
// __real is valid on scalars. This is a faster way of testing that.
// __imag can only produce an rvalue on scalars.
if (E->getOpcode() == UO_Real &&
- !LV.getAddress(*this).getElementType()->isStructTy()) {
+ !LV.getAddress().getElementType()->isStructTy()) {
assert(E->getSubExpr()->getType()->isArithmeticType());
return LV;
}
@@ -3226,8 +3226,8 @@ LValue CodeGenFunction::EmitUnaryOpLValue(const UnaryOperator *E) {
Address Component =
(E->getOpcode() == UO_Real
- ? emitAddrOfRealComponent(LV.getAddress(*this), LV.getType())
- : emitAddrOfImagComponent(LV.getAddress(*this), LV.getType()));
+ ? emitAddrOfRealComponent(LV.getAddress(), LV.getType())
+ : emitAddrOfImagComponent(LV.getAddress(), LV.getType()));
LValue ElemLV = MakeAddrLValue(Component, T, LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, T));
ElemLV.getQuals().addQualifiers(LV.getQuals());
@@ -3882,7 +3882,7 @@ Address CodeGenFunction::EmitArrayToPointerDecay(const Expr *E,
// Expressions of array type can't be bitfields or vector elements.
LValue LV = EmitLValue(E);
- Address Addr = LV.getAddress(*this);
+ Address Addr = LV.getAddress();
// If the array type was an incomplete type, we need to make sure
// the decay ends up being the right type.
@@ -4186,9 +4186,8 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
LValue LHS = EmitLValue(E->getBase());
auto *Idx = EmitIdxAfterBase(/*Promote*/false);
assert(LHS.isSimple() && "Can only subscript lvalue vectors here!");
- return LValue::MakeVectorElt(LHS.getAddress(*this), Idx,
- E->getBase()->getType(), LHS.getBaseInfo(),
- TBAAAccessInfo());
+ return LValue::MakeVectorElt(LHS.getAddress(), Idx, E->getBase()->getType(),
+ LHS.getBaseInfo(), TBAAAccessInfo());
}
// All the other cases basically behave like simple offsetting.
@@ -4300,7 +4299,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// Create a GEP with a byte offset between the FAM and count and
// use that to load the count value.
Addr = Builder.CreatePointerBitCastOrAddrSpaceCast(
- ArrayLV.getAddress(*this), Int8PtrTy, Int8Ty);
+ ArrayLV.getAddress(), Int8PtrTy, Int8Ty);
llvm::Type *CountTy = ConvertType(CountFD->getType());
llvm::Value *Res = Builder.CreateInBoundsGEP(
@@ -4320,7 +4319,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
// Propagate the alignment from the array itself to the result.
QualType arrayType = Array->getType();
Addr = emitArraySubscriptGEP(
- *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
+ *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
E->getType(), !getLangOpts().isSignedOverflowDefined(), SignedIndices,
E->getExprLoc(), &arrayType, E->getBase());
EltBaseInfo = ArrayLV.getBaseInfo();
@@ -4359,7 +4358,7 @@ LValue CodeGenFunction::EmitMatrixSubscriptExpr(const MatrixSubscriptExpr *E) {
llvm::Value *FinalIdx =
Builder.CreateAdd(Builder.CreateMul(ColIdx, NumRows), RowIdx);
return LValue::MakeMatrixElt(
- MaybeConvertMatrixAddress(Base.getAddress(*this), *this), FinalIdx,
+ MaybeConvertMatrixAddress(Base.getAddress(), *this), FinalIdx,
E->getBase()->getType(), Base.getBaseInfo(), TBAAAccessInfo());
}
@@ -4372,7 +4371,7 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
if (auto *ASE = dyn_cast<ArraySectionExpr>(Base->IgnoreParenImpCasts())) {
BaseLVal = CGF.EmitArraySectionExpr(ASE, IsLowerBound);
if (BaseTy->isArrayType()) {
- Address Addr = BaseLVal.getAddress(CGF);
+ Address Addr = BaseLVal.getAddress();
BaseInfo = BaseLVal.getBaseInfo();
// If the array type was an incomplete type, we need to make sure
@@ -4396,7 +4395,7 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
BaseInfo.mergeForCast(TypeBaseInfo);
TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
- return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)),
+ return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress()),
CGF.ConvertTypeForMem(ElTy), Align);
}
return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
@@ -4548,7 +4547,7 @@ LValue CodeGenFunction::EmitArraySectionExpr(const ArraySectionExpr *E,
// Propagate the alignment from the array itself to the result.
EltPtr = emitArraySubscriptGEP(
- *this, ArrayLV.getAddress(*this), {CGM.getSize(CharUnits::Zero()), Idx},
+ *this, ArrayLV.getAddress(), {CGM.getSize(CharUnits::Zero()), Idx},
ResultExprTy, !getLangOpts().isSignedOverflowDefined(),
/*signedIndices=*/false, E->getExprLoc());
BaseInfo = ArrayLV.getBaseInfo();
@@ -4608,7 +4607,7 @@ EmitExtVectorElementExpr(const ExtVectorElementExpr *E) {
if (Base.isSimple()) {
llvm::Constant *CV =
llvm::ConstantDataVector::get(getLLVMContext(), Indices);
- return LValue::MakeExtVectorElt(Base.getAddress(*this), CV, type,
+ return LValue::MakeExtVectorElt(Base.getAddress(), CV, type,
Base.getBaseInfo(), TBAAAccessInfo());
}
assert(Base.isExtVectorElt() && "Can only subscript lvalue vec elts here!");
@@ -4797,7 +4796,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
field->getType()
.withCVRQualifiers(base.getVRQualifiers())
.isVolatileQualified();
- Address Addr = base.getAddress(*this);
+ Address Addr = base.getAddress();
unsigned Idx = RL.getLLVMFieldNo(field);
const RecordDecl *rec = field->getParent();
if (hasBPFPreserveStaticOffset(rec))
@@ -4873,7 +4872,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
getContext().getTypeSizeInChars(FieldType).getQuantity();
}
- Address addr = base.getAddress(*this);
+ Address addr = base.getAddress();
if (hasBPFPreserveStaticOffset(rec))
addr = wrapWithBPFPreserveStaticOffset(*this, addr);
if (auto *ClassDef = dyn_cast<CXXRecordDecl>(rec)) {
@@ -4960,7 +4959,7 @@ CodeGenFunction::EmitLValueForFieldInitialization(LValue Base,
if (!FieldType->isReferenceType())
return EmitLValueForField(Base, Field);
- Address V = emitAddrOfFieldStorage(*this, Base.getAddress(*this), Field);
+ Address V = emitAddrOfFieldStorage(*this, Base.getAddress(), Field);
// Make sure that the address is pointing to the right type.
llvm::Type *llvmType = ConvertTypeForMem(FieldType);
@@ -5142,8 +5141,8 @@ LValue CodeGenFunction::EmitConditionalOperatorLValue(
return EmitUnsupportedLValue(expr, "conditional operator");
if (Info.LHS && Info.RHS) {
- Address lhsAddr = Info.LHS->getAddress(*this);
- Address rhsAddr = Info.RHS->getAddress(*this);
+ Address lhsAddr = Info.LHS->getAddress();
+ Address rhsAddr = Info.RHS->getAddress();
Address result = mergeAddressesInConditionalExpr(
lhsAddr, rhsAddr, Info.lhsBlock, Info.rhsBlock,
Builder.GetInsertBlock(), expr->getType());
@@ -5232,7 +5231,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
case CK_Dynamic: {
LValue LV = EmitLValue(E->getSubExpr());
- Address V = LV.getAddress(*this);
+ Address V = LV.getAddress();
const auto *DCE = cast<CXXDynamicCastExpr>(E);
return MakeNaturalAlignRawAddrLValue(EmitDynamicCast(V, DCE), E->getType());
}
@@ -5253,7 +5252,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
if (E->changesVolatileQualification())
LV.getQuals() = E->getType().getQualifiers();
if (LV.isSimple()) {
- Address V = LV.getAddress(*this);
+ Address V = LV.getAddress();
if (V.isValid()) {
llvm::Type *T = ConvertTypeForMem(E->getType());
if (V.getElementType() != T)
@@ -5270,7 +5269,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
auto *DerivedClassDecl = cast<CXXRecordDecl>(DerivedClassTy->getDecl());
LValue LV = EmitLValue(E->getSubExpr());
- Address This = LV.getAddress(*this);
+ Address This = LV.getAddress();
// Perform the derived-to-base conversion
Address Base = GetAddressOfBaseClass(
@@ -5293,7 +5292,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
// Perform the base-to-derived conversion
Address Derived = GetAddressOfDerivedClass(
- LV.getAddress(*this), DerivedClassDecl, E->path_begin(), E->path_end(),
+ LV.getAddress(), DerivedClassDecl, E->path_begin(), E->path_end(),
/*NullCheckValue=*/false);
// C++11 [expr.static.cast]p2: Behavior is undefined if a downcast is
@@ -5316,7 +5315,7 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
CGM.EmitExplicitCastExprType(CE, this);
LValue LV = EmitLValue(E->getSubExpr());
- Address V = LV.getAddress(*this).withElementType(
+ Address V = LV.getAddress().withElementType(
ConvertTypeForMem(CE->getTypeAsWritten()->getPointeeType()));
if (SanOpts.has(SanitizerKind::CFIUnrelatedCast))
@@ -5335,12 +5334,12 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
E->getSubExpr()->getType().getAddressSpace(),
E->getType().getAddressSpace(), ConvertType(DestTy));
return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()),
- LV.getAddress(*this).getAlignment()),
+ LV.getAddress().getAlignment()),
E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
}
case CK_ObjCObjectLValueCast: {
LValue LV = EmitLValue(E->getSubExpr());
- Address V = LV.getAddress(*this).withElementType(ConvertType(E->getType()));
+ Address V = LV.getAddress().withElementType(ConvertType(E->getType()));
return MakeAddrLValue(V, E->getType(), LV.getBaseInfo(),
CGM.getTBAAInfoForSubobject(LV, E->getType()));
}
@@ -5400,7 +5399,7 @@ RValue CodeGenFunction::EmitRValueForField(LValue LV,
case TEK_Complex:
return RValue::getComplex(EmitLoadOfComplex(FieldLV, Loc));
case TEK_Aggregate:
- return FieldLV.asAggregateRValue(*this);
+ return FieldLV.asAggregateRValue();
case TEK_Scalar:
// This routine is used to load fields one-by-one to perform a copy, so
// don't load reference fields.
@@ -6022,7 +6021,7 @@ EmitPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
if (E->getOpcode() == BO_PtrMemI) {
BaseAddr = EmitPointerWithAlignment(E->getLHS());
} else {
- BaseAddr = EmitLValue(E->getLHS()).getAddress(*this);
+ BaseAddr = EmitLValue(E->getLHS()).getAddress();
}
llvm::Value *OffsetV = EmitScalarExpr(E->getRHS());
@@ -6047,7 +6046,7 @@ RValue CodeGenFunction::convertTempToRValue(Address addr,
case TEK_Complex:
return RValue::getComplex(EmitLoadOfComplex(lvalue, loc));
case TEK_Aggregate:
- return lvalue.asAggregateRValue(*this);
+ return lvalue.asAggregateRValue();
case TEK_Scalar:
return RValue::get(EmitLoadOfScalar(lvalue, loc));
}
diff --git a/clang/lib/CodeGen/CGExprAgg.cpp b/clang/lib/CodeGen/CGExprAgg.cpp
index 6172eb9cdc1b..bba00257fd4f 100644
--- a/clang/lib/CodeGen/CGExprAgg.cpp
+++ b/clang/lib/CodeGen/CGExprAgg.cpp
@@ -384,8 +384,8 @@ void AggExprEmitter::EmitFinalDestCopy(QualType type, const LValue &src,
}
AggValueSlot srcAgg = AggValueSlot::forLValue(
- src, CGF, AggValueSlot::IsDestructed, needsGC(type),
- AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
+ src, AggValueSlot::IsDestructed, needsGC(type), AggValueSlot::IsAliased,
+ AggValueSlot::MayOverlap);
EmitCopy(type, Dest, srcAgg);
}
@@ -423,7 +423,7 @@ AggExprEmitter::VisitCXXStdInitializerListExpr(CXXStdInitializerListExpr *E) {
ASTContext &Ctx = CGF.getContext();
LValue Array = CGF.EmitLValue(E->getSubExpr());
assert(Array.isSimple() && "initializer_list array not a simple lvalue");
- Address ArrayPtr = Array.getAddress(CGF);
+ Address ArrayPtr = Array.getAddress();
const ConstantArrayType *ArrayType =
Ctx.getAsConstantArrayType(E->getSubExpr()->getType());
@@ -747,7 +747,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
CodeGenFunction::TCK_Load);
// FIXME: Do we also need to handle property references here?
if (LV.isSimple())
- CGF.EmitDynamicCast(LV.getAddress(CGF), cast<CXXDynamicCastExpr>(E));
+ CGF.EmitDynamicCast(LV.getAddress(), cast<CXXDynamicCastExpr>(E));
else
CGF.CGM.ErrorUnsupported(E, "non-simple lvalue dynamic_cast");
@@ -780,8 +780,7 @@ void AggExprEmitter::VisitCastExpr(CastExpr *E) {
}
LValue SourceLV = CGF.EmitLValue(E->getSubExpr());
- Address SourceAddress =
- SourceLV.getAddress(CGF).withElementType(CGF.Int8Ty);
+ Address SourceAddress = SourceLV.getAddress().withElementType(CGF.Int8Ty);
Address DestAddress = Dest.getAddress().withElementType(CGF.Int8Ty);
llvm::Value *SizeVal = llvm::ConstantInt::get(
CGF.SizeTy,
@@ -1231,7 +1230,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
}
EmitCopy(E->getLHS()->getType(),
- AggValueSlot::forLValue(LHS, CGF, AggValueSlot::IsDestructed,
+ AggValueSlot::forLValue(LHS, AggValueSlot::IsDestructed,
needsGC(E->getLHS()->getType()),
AggValueSlot::IsAliased,
AggValueSlot::MayOverlap),
@@ -1253,7 +1252,7 @@ void AggExprEmitter::VisitBinAssign(const BinaryOperator *E) {
// Codegen the RHS so that it stores directly into the LHS.
AggValueSlot LHSSlot = AggValueSlot::forLValue(
- LHS, CGF, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
+ LHS, AggValueSlot::IsDestructed, needsGC(E->getLHS()->getType()),
AggValueSlot::IsAliased, AggValueSlot::MayOverlap);
// A non-volatile aggregate destination might have volatile member.
if (!LHSSlot.isVolatile() &&
@@ -1400,9 +1399,9 @@ AggExprEmitter::VisitLambdaExpr(LambdaExpr *E) {
CurField->getType().isDestructedType()) {
assert(LV.isSimple());
if (DtorKind)
- CGF.pushDestroyAndDeferDeactivation(
- NormalAndEHCleanup, LV.getAddress(CGF), CurField->getType(),
- CGF.getDestroyer(DtorKind), false);
+ CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(),
+ CurField->getType(),
+ CGF.getDestroyer(DtorKind), false);
}
}
}
@@ -1580,7 +1579,7 @@ AggExprEmitter::EmitInitializationToLValue(Expr *E, LValue LV) {
return;
case TEK_Aggregate:
CGF.EmitAggExpr(
- E, AggValueSlot::forLValue(LV, CGF, AggValueSlot::IsDestructed,
+ E, AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
AggValueSlot::MayOverlap, Dest.isZeroed()));
@@ -1619,7 +1618,7 @@ void AggExprEmitter::EmitNullInitializationToLValue(LValue lv) {
// There's a potential optimization opportunity in combining
// memsets; that would be easy for arrays, but relatively
// difficult for structures with the current code.
- CGF.EmitNullInitialization(lv.getAddress(CGF), lv.getType());
+ CGF.EmitNullInitialization(lv.getAddress(), lv.getType());
}
}
@@ -1795,9 +1794,9 @@ void AggExprEmitter::VisitCXXParenListOrInitListExpr(
= field->getType().isDestructedType()) {
assert(LV.isSimple());
if (dtorKind) {
- CGF.pushDestroyAndDeferDeactivation(
- NormalAndEHCleanup, LV.getAddress(CGF), field->getType(),
- CGF.getDestroyer(dtorKind), false);
+ CGF.pushDestroyAndDeferDeactivation(NormalAndEHCleanup, LV.getAddress(),
+ field->getType(),
+ CGF.getDestroyer(dtorKind), false);
pushedCleanup = true;
}
}
@@ -1880,7 +1879,7 @@ void AggExprEmitter::VisitArrayInitLoopExpr(const ArrayInitLoopExpr *E,
if (InnerLoop) {
// If the subexpression is an ArrayInitLoopExpr, share its cleanup.
auto elementSlot = AggValueSlot::forLValue(
- elementLV, CGF, AggValueSlot::IsDestructed,
+ elementLV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers, AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap);
AggExprEmitter(CGF, elementSlot, false)
@@ -2045,10 +2044,10 @@ LValue CodeGenFunction::EmitAggExprToLValue(const Expr *E) {
assert(hasAggregateEvaluationKind(E->getType()) && "Invalid argument!");
Address Temp = CreateMemTemp(E->getType());
LValue LV = MakeAddrLValue(Temp, E->getType());
- EmitAggExpr(E, AggValueSlot::forLValue(
- LV, *this, AggValueSlot::IsNotDestructed,
- AggValueSlot::DoesNotNeedGCBarriers,
- AggValueSlot::IsNotAliased, AggValueSlot::DoesNotOverlap));
+ EmitAggExpr(E, AggValueSlot::forLValue(LV, AggValueSlot::IsNotDestructed,
+ AggValueSlot::DoesNotNeedGCBarriers,
+ AggValueSlot::IsNotAliased,
+ AggValueSlot::DoesNotOverlap));
return LV;
}
@@ -2097,8 +2096,8 @@ void CodeGenFunction::EmitAggregateCopy(LValue Dest, LValue Src, QualType Ty,
bool isVolatile) {
assert(!Ty->isAnyComplexType() && "Shouldn't happen for complex");
- Address DestPtr = Dest.getAddress(*this);
- Address SrcPtr = Src.getAddress(*this);
+ Address DestPtr = Dest.getAddress();
+ Address SrcPtr = Src.getAddress();
if (getLangOpts().CPlusPlus) {
if (const RecordType *RT = Ty->getAs<RecordType>()) {
diff --git a/clang/lib/CodeGen/CGExprCXX.cpp b/clang/lib/CodeGen/CGExprCXX.cpp
index 0cfdb7effe47..3c4f59fc765f 100644
--- a/clang/lib/CodeGen/CGExprCXX.cpp
+++ b/clang/lib/CodeGen/CGExprCXX.cpp
@@ -142,7 +142,7 @@ RValue CodeGenFunction::EmitCXXPseudoDestructorExpr(
BaseQuals = PTy->getPointeeType().getQualifiers();
} else {
LValue BaseLV = EmitLValue(BaseExpr);
- BaseValue = BaseLV.getAddress(*this);
+ BaseValue = BaseLV.getAddress();
QualType BaseTy = BaseExpr->getType();
BaseQuals = BaseTy.getQualifiers();
}
@@ -298,7 +298,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
/*ImplicitParamTy=*/QualType(), CE, Args, nullptr);
EmitCXXConstructorCall(Ctor, Ctor_Complete, /*ForVirtualBase=*/false,
- /*Delegating=*/false, This.getAddress(*this), Args,
+ /*Delegating=*/false, This.getAddress(), Args,
AggValueSlot::DoesNotOverlap, CE->getExprLoc(),
/*NewPointerIsChecked=*/false);
return RValue::get(nullptr);
@@ -375,7 +375,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
assert(ReturnValue.isNull() && "Destructor shouldn't have return value");
if (UseVirtualCall) {
CGM.getCXXABI().EmitVirtualDestructorCall(*this, Dtor, Dtor_Complete,
- This.getAddress(*this),
+ This.getAddress(),
cast<CXXMemberCallExpr>(CE));
} else {
GlobalDecl GD(Dtor, Dtor_Complete);
@@ -403,14 +403,14 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
CGCallee Callee;
if (UseVirtualCall) {
- Callee = CGCallee::forVirtual(CE, MD, This.getAddress(*this), Ty);
+ Callee = CGCallee::forVirtual(CE, MD, This.getAddress(), Ty);
} else {
if (SanOpts.has(SanitizerKind::CFINVCall) &&
MD->getParent()->isDynamicClass()) {
llvm::Value *VTable;
const CXXRecordDecl *RD;
std::tie(VTable, RD) = CGM.getCXXABI().LoadVTablePtr(
- *this, This.getAddress(*this), CalleeDecl->getParent());
+ *this, This.getAddress(), CalleeDecl->getParent());
EmitVTablePtrCheckForCall(RD, VTable, CFITCK_NVCall, CE->getBeginLoc());
}
@@ -429,7 +429,7 @@ RValue CodeGenFunction::EmitCXXMemberOrOperatorMemberCallExpr(
if (MD->isVirtual()) {
Address NewThisAddr =
CGM.getCXXABI().adjustThisArgumentForVirtualFunctionCall(
- *this, CalleeDecl, This.getAddress(*this), UseVirtualCall);
+ *this, CalleeDecl, This.getAddress(), UseVirtualCall);
This.setAddress(NewThisAddr);
}
@@ -456,7 +456,7 @@ CodeGenFunction::EmitCXXMemberPointerCallExpr(const CXXMemberCallExpr *E,
if (BO->getOpcode() == BO_PtrMemI)
This = EmitPointerWithAlignment(BaseExpr, nullptr, nullptr, KnownNonNull);
else
- This = EmitLValue(BaseExpr, KnownNonNull).getAddress(*this);
+ This = EmitLValue(BaseExpr, KnownNonNull).getAddress();
EmitTypeCheck(TCK_MemberCall, E->getExprLoc(), This.emitRawPointer(*this),
QualType(MPT->getClass(), 0));
@@ -2178,7 +2178,7 @@ static bool isGLValueFromPointerDeref(const Expr *E) {
static llvm::Value *EmitTypeidFromVTable(CodeGenFunction &CGF, const Expr *E,
llvm::Type *StdTypeInfoPtrTy) {
// Get the vtable pointer.
- Address ThisPtr = CGF.EmitLValue(E).getAddress(CGF);
+ Address ThisPtr = CGF.EmitLValue(E).getAddress();
QualType SrcRecordTy = E->getType();
diff --git a/clang/lib/CodeGen/CGExprComplex.cpp b/clang/lib/CodeGen/CGExprComplex.cpp
index 1facadd82f17..9ef73e36f66f 100644
--- a/clang/lib/CodeGen/CGExprComplex.cpp
+++ b/clang/lib/CodeGen/CGExprComplex.cpp
@@ -434,7 +434,7 @@ ComplexPairTy ComplexExprEmitter::EmitLoadOfLValue(LValue lvalue,
if (lvalue.getType()->isAtomicType())
return CGF.EmitAtomicLoad(lvalue, loc).getComplexVal();
- Address SrcPtr = lvalue.getAddress(CGF);
+ Address SrcPtr = lvalue.getAddress();
bool isVolatile = lvalue.isVolatileQualified();
llvm::Value *Real = nullptr, *Imag = nullptr;
@@ -460,7 +460,7 @@ void ComplexExprEmitter::EmitStoreOfComplex(ComplexPairTy Val, LValue lvalue,
(!isInit && CGF.LValueIsSuitableForInlineAtomic(lvalue)))
return CGF.EmitAtomicStore(RValue::getComplex(Val), lvalue, isInit);
- Address Ptr = lvalue.getAddress(CGF);
+ Address Ptr = lvalue.getAddress();
Address RealPtr = CGF.emitAddrOfRealComponent(Ptr, lvalue.getType());
Address ImagPtr = CGF.emitAddrOfImagComponent(Ptr, lvalue.getType());
@@ -551,14 +551,14 @@ ComplexPairTy ComplexExprEmitter::EmitCast(CastKind CK, Expr *Op,
case CK_LValueBitCast: {
LValue origLV = CGF.EmitLValue(Op);
- Address V = origLV.getAddress(CGF).withElementType(CGF.ConvertType(DestTy));
+ Address V = origLV.getAddress().withElementType(CGF.ConvertType(DestTy));
return EmitLoadOfLValue(CGF.MakeAddrLValue(V, DestTy), Op->getExprLoc());
}
case CK_LValueToRValueBitCast: {
LValue SourceLVal = CGF.EmitLValue(Op);
- Address Addr = SourceLVal.getAddress(CGF).withElementType(
- CGF.ConvertTypeForMem(DestTy));
+ Address Addr =
+ SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, Op->getExprLoc());
diff --git a/clang/lib/CodeGen/CGExprScalar.cpp b/clang/lib/CodeGen/CGExprScalar.cpp
index d84531959b50..1b144c178ce9 100644
--- a/clang/lib/CodeGen/CGExprScalar.cpp
+++ b/clang/lib/CodeGen/CGExprScalar.cpp
@@ -2212,7 +2212,7 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_LValueBitCast:
case CK_ObjCObjectLValueCast: {
- Address Addr = EmitLValue(E).getAddress(CGF);
+ Address Addr = EmitLValue(E).getAddress();
Addr = Addr.withElementType(CGF.ConvertTypeForMem(DestTy));
LValue LV = CGF.MakeAddrLValue(Addr, DestTy);
return EmitLoadOfLValue(LV, CE->getExprLoc());
@@ -2220,8 +2220,8 @@ Value *ScalarExprEmitter::VisitCastExpr(CastExpr *CE) {
case CK_LValueToRValueBitCast: {
LValue SourceLVal = CGF.EmitLValue(E);
- Address Addr = SourceLVal.getAddress(CGF).withElementType(
- CGF.ConvertTypeForMem(DestTy));
+ Address Addr =
+ SourceLVal.getAddress().withElementType(CGF.ConvertTypeForMem(DestTy));
LValue DestLV = CGF.MakeAddrLValue(Addr, DestTy);
DestLV.setTBAAInfo(TBAAAccessInfo::getMayAliasInfo());
return EmitLoadOfLValue(DestLV, CE->getExprLoc());
@@ -2772,14 +2772,14 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
if (isInc && type->isBooleanType()) {
llvm::Value *True = CGF.EmitToMemory(Builder.getTrue(), type);
if (isPre) {
- Builder.CreateStore(True, LV.getAddress(CGF), LV.isVolatileQualified())
+ Builder.CreateStore(True, LV.getAddress(), LV.isVolatileQualified())
->setAtomic(llvm::AtomicOrdering::SequentiallyConsistent);
return Builder.getTrue();
}
// For atomic bool increment, we just store true and return it for
// preincrement, do an atomic swap with true for postincrement
return Builder.CreateAtomicRMW(
- llvm::AtomicRMWInst::Xchg, LV.getAddress(CGF), True,
+ llvm::AtomicRMWInst::Xchg, LV.getAddress(), True,
llvm::AtomicOrdering::SequentiallyConsistent);
}
// Special case for atomic increment / decrement on integers, emit
@@ -2797,7 +2797,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = CGF.EmitToMemory(
llvm::ConstantInt::get(ConvertType(type), 1, true), type);
llvm::Value *old =
- Builder.CreateAtomicRMW(aop, LV.getAddress(CGF), amt,
+ Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
llvm::AtomicOrdering::SequentiallyConsistent);
return isPre ? Builder.CreateBinOp(op, old, amt) : old;
}
@@ -2810,7 +2810,7 @@ ScalarExprEmitter::EmitScalarPrePostIncDec(const UnaryOperator *E, LValue LV,
llvm::Value *amt = llvm::ConstantFP::get(
VMContext, llvm::APFloat(static_cast<float>(1.0)));
llvm::Value *old =
- Builder.CreateAtomicRMW(aop, LV.getAddress(CGF), amt,
+ Builder.CreateAtomicRMW(aop, LV.getAddress(), amt,
llvm::AtomicOrdering::SequentiallyConsistent);
return isPre ? Builder.CreateBinOp(op, old, amt) : old;
}
@@ -3552,7 +3552,7 @@ LValue ScalarExprEmitter::EmitCompoundAssignLValue(
E->getExprLoc()),
LHSTy);
Value *OldVal = Builder.CreateAtomicRMW(
- AtomicOp, LHSLV.getAddress(CGF), Amt,
+ AtomicOp, LHSLV.getAddress(), Amt,
llvm::AtomicOrdering::SequentiallyConsistent);
// Since operation is atomic, the result type is guaranteed to be the
@@ -4782,7 +4782,7 @@ Value *ScalarExprEmitter::VisitBinAssign(const BinaryOperator *E) {
case Qualifiers::OCL_Weak:
RHS = Visit(E->getRHS());
LHS = EmitCheckedLValue(E->getLHS(), CodeGenFunction::TCK_Store);
- RHS = CGF.EmitARCStoreWeak(LHS.getAddress(CGF), RHS, Ignore);
+ RHS = CGF.EmitARCStoreWeak(LHS.getAddress(), RHS, Ignore);
break;
case Qualifiers::OCL_None:
@@ -5534,7 +5534,7 @@ LValue CodeGenFunction::EmitObjCIsaExpr(const ObjCIsaExpr *E) {
ConvertTypeForMem(BaseExpr->getType()->getPointeeType());
Addr = Address(EmitScalarExpr(BaseExpr), BaseTy, getPointerAlign());
} else {
- Addr = EmitLValue(BaseExpr).getAddress(*this);
+ Addr = EmitLValue(BaseExpr).getAddress();
}
// Cast the address to Class*.
diff --git a/clang/lib/CodeGen/CGNonTrivialStruct.cpp b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
index 8fade0fac21e..6a02e4dbf84d 100644
--- a/clang/lib/CodeGen/CGNonTrivialStruct.cpp
+++ b/clang/lib/CodeGen/CGNonTrivialStruct.cpp
@@ -711,7 +711,7 @@ struct GenMoveConstructor : GenBinaryFunc<GenMoveConstructor, true> {
LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT);
llvm::Value *SrcVal =
CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal();
- CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress(*CGF)), SrcLV);
+ CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV);
CGF->EmitStoreOfScalar(SrcVal, CGF->MakeAddrLValue(Addrs[DstIdx], QT),
/* isInitialization */ true);
}
@@ -774,7 +774,7 @@ struct GenMoveAssignment : GenBinaryFunc<GenMoveAssignment, true> {
LValue SrcLV = CGF->MakeAddrLValue(Addrs[SrcIdx], QT);
llvm::Value *SrcVal =
CGF->EmitLoadOfLValue(SrcLV, SourceLocation()).getScalarVal();
- CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress(*CGF)), SrcLV);
+ CGF->EmitStoreOfScalar(getNullForVariable(SrcLV.getAddress()), SrcLV);
LValue DstLV = CGF->MakeAddrLValue(Addrs[DstIdx], QT);
llvm::Value *DstVal =
CGF->EmitLoadOfLValue(DstLV, SourceLocation()).getScalarVal();
@@ -810,7 +810,7 @@ void CodeGenFunction::destroyNonTrivialCStruct(CodeGenFunction &CGF,
// such structure.
void CodeGenFunction::defaultInitNonTrivialCStructVar(LValue Dst) {
GenDefaultInitialize Gen(getContext());
- Address DstPtr = Dst.getAddress(*this).withElementType(CGM.Int8PtrTy);
+ Address DstPtr = Dst.getAddress().withElementType(CGM.Int8PtrTy);
Gen.setCGF(this);
QualType QT = Dst.getType();
QT = Dst.isVolatile() ? QT.withVolatile() : QT;
@@ -842,7 +842,7 @@ getSpecialFunction(G &&Gen, StringRef FuncName, QualType QT, bool IsVolatile,
// Functions to emit calls to the special functions of a non-trivial C struct.
void CodeGenFunction::callCStructDefaultConstructor(LValue Dst) {
bool IsVolatile = Dst.isVolatile();
- Address DstPtr = Dst.getAddress(*this);
+ Address DstPtr = Dst.getAddress();
QualType QT = Dst.getType();
GenDefaultInitializeFuncName GenName(DstPtr.getAlignment(), getContext());
std::string FuncName = GenName.getName(QT, IsVolatile);
@@ -866,7 +866,7 @@ std::string CodeGenFunction::getNonTrivialDestructorStr(QualType QT,
void CodeGenFunction::callCStructDestructor(LValue Dst) {
bool IsVolatile = Dst.isVolatile();
- Address DstPtr = Dst.getAddress(*this);
+ Address DstPtr = Dst.getAddress();
QualType QT = Dst.getType();
GenDestructorFuncName GenName("__destructor_", DstPtr.getAlignment(),
getContext());
@@ -877,7 +877,7 @@ void CodeGenFunction::callCStructDestructor(LValue Dst) {
void CodeGenFunction::callCStructCopyConstructor(LValue Dst, LValue Src) {
bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
- Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this);
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
QualType QT = Dst.getType();
GenBinaryFuncName<false> GenName("__copy_constructor_", DstPtr.getAlignment(),
SrcPtr.getAlignment(), getContext());
@@ -891,7 +891,7 @@ void CodeGenFunction::callCStructCopyAssignmentOperator(LValue Dst, LValue Src
) {
bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
- Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this);
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
QualType QT = Dst.getType();
GenBinaryFuncName<false> GenName("__copy_assignment_", DstPtr.getAlignment(),
SrcPtr.getAlignment(), getContext());
@@ -902,7 +902,7 @@ void CodeGenFunction::callCStructCopyAssignmentOperator(LValue Dst, LValue Src
void CodeGenFunction::callCStructMoveConstructor(LValue Dst, LValue Src) {
bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
- Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this);
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
QualType QT = Dst.getType();
GenBinaryFuncName<true> GenName("__move_constructor_", DstPtr.getAlignment(),
SrcPtr.getAlignment(), getContext());
@@ -916,7 +916,7 @@ void CodeGenFunction::callCStructMoveAssignmentOperator(LValue Dst, LValue Src
) {
bool IsVolatile = Dst.isVolatile() || Src.isVolatile();
- Address DstPtr = Dst.getAddress(*this), SrcPtr = Src.getAddress(*this);
+ Address DstPtr = Dst.getAddress(), SrcPtr = Src.getAddress();
QualType QT = Dst.getType();
GenBinaryFuncName<true> GenName("__move_assignment_", DstPtr.getAlignment(),
SrcPtr.getAlignment(), getContext());
diff --git a/clang/lib/CodeGen/CGObjC.cpp b/clang/lib/CodeGen/CGObjC.cpp
index ee571995ce4c..281b2d9795f6 100644
--- a/clang/lib/CodeGen/CGObjC.cpp
+++ b/clang/lib/CodeGen/CGObjC.cpp
@@ -586,7 +586,7 @@ RValue CodeGenFunction::EmitObjCMessageExpr(const ObjCMessageExpr *E,
method->getMethodFamily() == OMF_retain) {
if (auto lvalueExpr = findWeakLValue(E->getInstanceReceiver())) {
LValue lvalue = EmitLValue(lvalueExpr);
- llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress(*this));
+ llvm::Value *result = EmitARCLoadWeakRetained(lvalue.getAddress());
return AdjustObjCObjectType(*this, E->getType(), RValue::get(result));
}
}
@@ -1189,7 +1189,7 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
llvm::Type *bitcastType = llvm::Type::getIntNTy(getLLVMContext(), ivarSize);
// Perform an atomic load. This does not impose ordering constraints.
- Address ivarAddr = LV.getAddress(*this);
+ Address ivarAddr = LV.getAddress();
ivarAddr = ivarAddr.withElementType(bitcastType);
llvm::LoadInst *load = Builder.CreateLoad(ivarAddr, "load");
load->setAtomic(llvm::AtomicOrdering::Unordered);
@@ -1287,14 +1287,14 @@ CodeGenFunction::generateObjCGetterBody(const ObjCImplementationDecl *classImpl,
case TEK_Scalar: {
llvm::Value *value;
if (propType->isReferenceType()) {
- value = LV.getAddress(*this).emitRawPointer(*this);
+ value = LV.getAddress().emitRawPointer(*this);
} else {
// We want to load and autoreleaseReturnValue ARC __weak ivars.
if (LV.getQuals().getObjCLifetime() == Qualifiers::OCL_Weak) {
if (getLangOpts().ObjCAutoRefCount) {
value = emitARCRetainLoadOfScalar(*this, LV, ivarType);
} else {
- value = EmitARCLoadWeak(LV.getAddress(*this));
+ value = EmitARCLoadWeak(LV.getAddress());
}
// Otherwise we want to do a simple load, suppressing the
@@ -1477,7 +1477,7 @@ CodeGenFunction::generateObjCSetterBody(const ObjCImplementationDecl *classImpl,
LValue ivarLValue =
EmitLValueForIvar(TypeOfSelfObject(), LoadObjCSelf(), ivar, /*quals*/ 0);
- Address ivarAddr = ivarLValue.getAddress(*this);
+ Address ivarAddr = ivarLValue.getAddress();
// Currently, all atomic accesses have to be through integer
// types, so there's no point in trying to pick a prettier type.
@@ -1655,7 +1655,7 @@ namespace {
void Emit(CodeGenFunction &CGF, Flags flags) override {
LValue lvalue
= CGF.EmitLValueForIvar(CGF.TypeOfSelfObject(), addr, ivar, /*CVR*/ 0);
- CGF.emitDestroy(lvalue.getAddress(CGF), ivar->getType(), destroyer,
+ CGF.emitDestroy(lvalue.getAddress(), ivar->getType(), destroyer,
flags.isForNormalCleanup() && useEHCleanupForArray);
}
};
@@ -1722,7 +1722,7 @@ void CodeGenFunction::GenerateObjCCtorDtorMethod(ObjCImplementationDecl *IMP,
LValue LV = EmitLValueForIvar(TypeOfSelfObject(),
LoadObjCSelf(), Ivar, 0);
EmitAggExpr(IvarInit->getInit(),
- AggValueSlot::forLValue(LV, *this, AggValueSlot::IsDestructed,
+ AggValueSlot::forLValue(LV, AggValueSlot::IsDestructed,
AggValueSlot::DoesNotNeedGCBarriers,
AggValueSlot::IsNotAliased,
AggValueSlot::DoesNotOverlap));
@@ -2508,7 +2508,7 @@ llvm::Value *CodeGenFunction::EmitARCStoreStrong(LValue dst,
!isBlock &&
(dst.getAlignment().isZero() ||
dst.getAlignment() >= CharUnits::fromQuantity(PointerAlignInBytes))) {
- return EmitARCStoreStrongCall(dst.getAddress(*this), newValue, ignored);
+ return EmitARCStoreStrongCall(dst.getAddress(), newValue, ignored);
}
// Otherwise, split it out.
@@ -2898,7 +2898,7 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
result = CGF.EmitLoadOfLValue(lvalue, SourceLocation()).getScalarVal();
} else {
assert(type.getObjCLifetime() == Qualifiers::OCL_Weak);
- result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress(CGF));
+ result = CGF.EmitARCLoadWeakRetained(lvalue.getAddress());
}
return TryEmitResult(result, !shouldRetain);
}
@@ -2922,7 +2922,7 @@ static TryEmitResult tryEmitARCRetainLoadOfScalar(CodeGenFunction &CGF,
SourceLocation()).getScalarVal();
// Set the source pointer to NULL.
- CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress(CGF)), lv);
+ CGF.EmitStoreOfScalar(getNullForVariable(lv.getAddress()), lv);
return TryEmitResult(result, true);
}
diff --git a/clang/lib/CodeGen/CGOpenMPRuntime.cpp b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
index f56af318ff6a..f6d12d46cfc0 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntime.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntime.cpp
@@ -373,7 +373,7 @@ public:
/*RefersToEnclosingVariableOrCapture=*/false,
VD->getType().getNonReferenceType(), VK_LValue,
C.getLocation());
- PrivScope.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
+ PrivScope.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
}
(void)PrivScope.Privatize();
}
@@ -809,7 +809,7 @@ void ReductionCodeGen::emitAggregateType(CodeGenFunction &CGF, unsigned N) {
}
llvm::Value *Size;
llvm::Value *SizeInChars;
- auto *ElemType = OrigAddresses[N].first.getAddress(CGF).getElementType();
+ auto *ElemType = OrigAddresses[N].first.getAddress().getElementType();
auto *ElemSizeOf = llvm::ConstantExpr::getSizeOf(ElemType);
if (AsArraySection) {
Size = CGF.Builder.CreatePtrDiff(ElemType,
@@ -897,15 +897,15 @@ static LValue loadToBegin(CodeGenFunction &CGF, QualType BaseTy, QualType ElTy,
while ((BaseTy->isPointerType() || BaseTy->isReferenceType()) &&
!CGF.getContext().hasSameType(BaseTy, ElTy)) {
if (const auto *PtrTy = BaseTy->getAs<PointerType>()) {
- BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(CGF), PtrTy);
+ BaseLV = CGF.EmitLoadOfPointerLValue(BaseLV.getAddress(), PtrTy);
} else {
- LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(CGF), BaseTy);
+ LValue RefLVal = CGF.MakeAddrLValue(BaseLV.getAddress(), BaseTy);
BaseLV = CGF.EmitLoadOfReferenceLValue(RefLVal);
}
BaseTy = BaseTy->getPointeeType();
}
return CGF.MakeAddrLValue(
- BaseLV.getAddress(CGF).withElementType(CGF.ConvertTypeForMem(ElTy)),
+ BaseLV.getAddress().withElementType(CGF.ConvertTypeForMem(ElTy)),
BaseLV.getType(), BaseLV.getBaseInfo(),
CGF.CGM.getTBAAInfoForSubobject(BaseLV, BaseLV.getType()));
}
@@ -968,7 +968,7 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
LValue BaseLValue =
loadToBegin(CGF, OrigVD->getType(), SharedAddresses[N].first.getType(),
OriginalBaseLValue);
- Address SharedAddr = SharedAddresses[N].first.getAddress(CGF);
+ Address SharedAddr = SharedAddresses[N].first.getAddress();
llvm::Value *Adjustment = CGF.Builder.CreatePtrDiff(
SharedAddr.getElementType(), BaseLValue.getPointer(CGF),
SharedAddr.emitRawPointer(CGF));
@@ -979,7 +979,7 @@ Address ReductionCodeGen::adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
SharedAddr.getElementType(), PrivatePointer, Adjustment);
return castToBase(CGF, OrigVD->getType(),
SharedAddresses[N].first.getType(),
- OriginalBaseLValue.getAddress(CGF), Ptr);
+ OriginalBaseLValue.getAddress(), Ptr);
}
BaseDecls.emplace_back(
cast<VarDecl>(cast<DeclRefExpr>(ClausesData[N].Ref)->getDecl()));
@@ -1108,11 +1108,11 @@ emitCombinerOrInitializer(CodeGenModule &CGM, QualType Ty,
Address AddrIn = CGF.GetAddrOfLocalVar(&OmpInParm);
Scope.addPrivate(
In, CGF.EmitLoadOfPointerLValue(AddrIn, PtrTy->castAs<PointerType>())
- .getAddress(CGF));
+ .getAddress());
Address AddrOut = CGF.GetAddrOfLocalVar(&OmpOutParm);
Scope.addPrivate(
Out, CGF.EmitLoadOfPointerLValue(AddrOut, PtrTy->castAs<PointerType>())
- .getAddress(CGF));
+ .getAddress());
(void)Scope.Privatize();
if (!IsCombiner && Out->hasInit() &&
!CGF.isTrivialInitializer(Out->getInit())) {
@@ -1946,7 +1946,7 @@ Address CGOpenMPRuntime::emitThreadIDAddress(CodeGenFunction &CGF,
if (auto *OMPRegionInfo =
dyn_cast_or_null<CGOpenMPRegionInfo>(CGF.CapturedStmtInfo))
if (OMPRegionInfo->getThreadIDVariable())
- return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress(CGF);
+ return OMPRegionInfo->getThreadIDVariableLValue(CGF).getAddress();
llvm::Value *ThreadID = getThreadID(CGF, Loc);
QualType Int32Ty =
@@ -3046,7 +3046,7 @@ emitProxyTaskFunction(CodeGenModule &CGM, SourceLocation Loc,
llvm::Value *CommonArgs[] = {
GtidParam, PartidParam, PrivatesParam, TaskPrivatesMap,
CGF.Builder
- .CreatePointerBitCastOrAddrSpaceCast(TDBase.getAddress(CGF),
+ .CreatePointerBitCastOrAddrSpaceCast(TDBase.getAddress(),
CGF.VoidPtrTy, CGF.Int8Ty)
.emitRawPointer(CGF)};
SmallVector<llvm::Value *, 16> CallArgs(std::begin(CommonArgs),
@@ -3125,7 +3125,7 @@ static llvm::Value *emitDestructorsFunction(CodeGenModule &CGM,
if (QualType::DestructionKind DtorKind =
Field->getType().isDestructedType()) {
LValue FieldLValue = CGF.EmitLValueForField(Base, Field);
- CGF.pushDestroy(DtorKind, FieldLValue.getAddress(CGF), Field->getType());
+ CGF.pushDestroy(DtorKind, FieldLValue.getAddress(), Field->getType());
}
}
CGF.FinishFunction();
@@ -3233,7 +3233,7 @@ emitTaskPrivateMappingFunction(CodeGenModule &CGM, SourceLocation Loc,
LValue RefLVal =
CGF.MakeAddrLValue(CGF.GetAddrOfLocalVar(VD), VD->getType());
LValue RefLoadLVal = CGF.EmitLoadOfPointerLValue(
- RefLVal.getAddress(CGF), RefLVal.getType()->castAs<PointerType>());
+ RefLVal.getAddress(), RefLVal.getType()->castAs<PointerType>());
CGF.EmitStoreOfScalar(FieldLVal.getPointer(CGF), RefLoadLVal);
++Counter;
}
@@ -3305,7 +3305,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
} else if (ForDup) {
SharedRefLValue = CGF.EmitLValueForField(SrcBase, SharedField);
SharedRefLValue = CGF.MakeAddrLValue(
- SharedRefLValue.getAddress(CGF).withAlignment(
+ SharedRefLValue.getAddress().withAlignment(
C.getDeclAlign(OriginalVD)),
SharedRefLValue.getType(), LValueBaseInfo(AlignmentSource::Decl),
SharedRefLValue.getTBAAInfo());
@@ -3329,8 +3329,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
// Initialize firstprivate array using element-by-element
// initialization.
CGF.EmitOMPAggregateAssign(
- PrivateLValue.getAddress(CGF), SharedRefLValue.getAddress(CGF),
- Type,
+ PrivateLValue.getAddress(), SharedRefLValue.getAddress(), Type,
[&CGF, Elem, Init, &CapturesInfo](Address DestElement,
Address SrcElement) {
// Clean up any temporaries needed by the initialization.
@@ -3347,7 +3346,7 @@ static void emitPrivatesInit(CodeGenFunction &CGF,
}
} else {
CodeGenFunction::OMPPrivateScope InitScope(CGF);
- InitScope.addPrivate(Elem, SharedRefLValue.getAddress(CGF));
+ InitScope.addPrivate(Elem, SharedRefLValue.getAddress());
(void)InitScope.Privatize();
CodeGenFunction::CGCapturedStmtRAII CapInfoRAII(CGF, &CapturesInfo);
CGF.EmitExprAsInit(Init, VD, PrivateLValue,
@@ -3508,7 +3507,7 @@ public:
HelperData.CounterVD->getType());
// Counter = 0;
CGF.EmitStoreOfScalar(
- llvm::ConstantInt::get(CLVal.getAddress(CGF).getElementType(), 0),
+ llvm::ConstantInt::get(CLVal.getAddress().getElementType(), 0),
CLVal);
CodeGenFunction::JumpDest &ContDest =
ContDests.emplace_back(CGF.getJumpDestInCurrentScope("iter.cont"));
@@ -3572,7 +3571,7 @@ getPointerAndSize(CodeGenFunction &CGF, const Expr *E) {
} else if (const auto *ASE =
dyn_cast<ArraySectionExpr>(E->IgnoreParenImpCasts())) {
LValue UpAddrLVal = CGF.EmitArraySectionExpr(ASE, /*IsLowerBound=*/false);
- Address UpAddrAddress = UpAddrLVal.getAddress(CGF);
+ Address UpAddrAddress = UpAddrLVal.getAddress();
llvm::Value *UpAddr = CGF.Builder.CreateConstGEP1_32(
UpAddrAddress.getElementType(), UpAddrAddress.emitRawPointer(CGF),
/*Idx0=*/1);
@@ -4045,11 +4044,11 @@ CGOpenMPRuntime::getDepobjElements(CodeGenFunction &CGF, LValue DepobjLVal,
cast<RecordDecl>(KmpDependInfoTy->getAsTagDecl());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF).withElementType(
+ DepobjLVal.getAddress().withElementType(
CGF.ConvertTypeForMem(KmpDependInfoPtrTy)),
KmpDependInfoPtrTy->castAs<PointerType>());
Address DepObjAddr = CGF.Builder.CreateGEP(
- CGF, Base.getAddress(CGF),
+ CGF, Base.getAddress(),
llvm::ConstantInt::get(CGF.IntPtrTy, -1, /*isSigned=*/true));
LValue NumDepsBase = CGF.MakeAddrLValue(
DepObjAddr, KmpDependInfoTy, Base.getBaseInfo(), Base.getTBAAInfo());
@@ -4156,7 +4155,7 @@ SmallVector<llvm::Value *, 4> CGOpenMPRuntime::emitDepobjElementsSizes(
CGF.CreateMemTemp(C.getUIntPtrType(), "depobj.size.addr"),
C.getUIntPtrType());
CGF.Builder.CreateStore(llvm::ConstantInt::get(CGF.IntPtrTy, 0),
- NumLVal.getAddress(CGF));
+ NumLVal.getAddress());
llvm::Value *PrevVal = CGF.EmitLoadOfScalar(NumLVal, E->getExprLoc());
llvm::Value *Add = CGF.Builder.CreateNUWAdd(PrevVal, NumDeps);
CGF.EmitStoreOfScalar(Add, NumLVal);
@@ -4198,7 +4197,7 @@ void CGOpenMPRuntime::emitDepobjElements(CodeGenFunction &CGF,
CGF.Builder.CreateIntCast(NumDeps, CGF.SizeTy, /*isSigned=*/false));
llvm::Value *Pos = CGF.EmitLoadOfScalar(PosLVal, E->getExprLoc());
Address DepAddr = CGF.Builder.CreateGEP(CGF, DependenciesArray, Pos);
- CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(CGF), Size);
+ CGF.Builder.CreateMemCpy(DepAddr, Base.getAddress(), Size);
// Increase pos.
// pos += size;
@@ -4425,11 +4424,11 @@ void CGOpenMPRuntime::emitDestroyClause(CodeGenFunction &CGF, LValue DepobjLVal,
ASTContext &C = CGM.getContext();
QualType FlagsTy;
getDependTypes(C, KmpDependInfoTy, FlagsTy);
- LValue Base = CGF.EmitLoadOfPointerLValue(
- DepobjLVal.getAddress(CGF), C.VoidPtrTy.castAs<PointerType>());
+ LValue Base = CGF.EmitLoadOfPointerLValue(DepobjLVal.getAddress(),
+ C.VoidPtrTy.castAs<PointerType>());
QualType KmpDependInfoPtrTy = C.getPointerType(KmpDependInfoTy);
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- Base.getAddress(CGF), CGF.ConvertTypeForMem(KmpDependInfoPtrTy),
+ Base.getAddress(), CGF.ConvertTypeForMem(KmpDependInfoPtrTy),
CGF.ConvertTypeForMem(KmpDependInfoTy));
llvm::Value *DepObjAddr = CGF.Builder.CreateGEP(
Addr.getElementType(), Addr.emitRawPointer(CGF),
@@ -4460,7 +4459,7 @@ void CGOpenMPRuntime::emitUpdateClause(CodeGenFunction &CGF, LValue DepobjLVal,
LValue Base;
std::tie(NumDeps, Base) = getDepobjElements(CGF, DepobjLVal, Loc);
- Address Begin = Base.getAddress(CGF);
+ Address Begin = Base.getAddress();
// Cast from pointer to array type to pointer to single element.
llvm::Value *End = CGF.Builder.CreateGEP(Begin.getElementType(),
Begin.emitRawPointer(CGF), NumDeps);
@@ -4646,24 +4645,21 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTLowerBound));
const auto *LBVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getLowerBoundVariable())->getDecl());
- CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(CGF),
- LBLVal.getQuals(),
+ CGF.EmitAnyExprToMem(LBVar->getInit(), LBLVal.getAddress(), LBLVal.getQuals(),
/*IsInitializer=*/true);
LValue UBLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTUpperBound));
const auto *UBVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getUpperBoundVariable())->getDecl());
- CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(CGF),
- UBLVal.getQuals(),
+ CGF.EmitAnyExprToMem(UBVar->getInit(), UBLVal.getAddress(), UBLVal.getQuals(),
/*IsInitializer=*/true);
LValue StLVal = CGF.EmitLValueForField(
Result.TDBase,
*std::next(Result.KmpTaskTQTyRD->field_begin(), KmpTaskTStride));
const auto *StVar =
cast<VarDecl>(cast<DeclRefExpr>(D.getStrideVariable())->getDecl());
- CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(CGF),
- StLVal.getQuals(),
+ CGF.EmitAnyExprToMem(StVar->getInit(), StLVal.getAddress(), StLVal.getQuals(),
/*IsInitializer=*/true);
// Store reductions address.
LValue RedLVal = CGF.EmitLValueForField(
@@ -4672,7 +4668,7 @@ void CGOpenMPRuntime::emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
if (Data.Reductions) {
CGF.EmitStoreOfScalar(Data.Reductions, RedLVal);
} else {
- CGF.EmitNullInitialization(RedLVal.getAddress(CGF),
+ CGF.EmitNullInitialization(RedLVal.getAddress(),
CGF.getContext().VoidPtrTy);
}
enum { NoSchedule = 0, Grainsize = 1, NumTasks = 2 };
@@ -5522,8 +5518,7 @@ llvm::Value *CGOpenMPRuntime::emitTaskReductionInit(
llvm::ConstantInt::get(CGM.Int32Ty, /*V=*/1, /*isSigned=*/true),
FlagsLVal);
} else
- CGF.EmitNullInitialization(FlagsLVal.getAddress(CGF),
- FlagsLVal.getType());
+ CGF.EmitNullInitialization(FlagsLVal.getAddress(), FlagsLVal.getType());
}
if (Data.IsReductionWithTaskMod) {
// Build call void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int
@@ -5850,7 +5845,7 @@ void CGOpenMPRuntime::emitUsesAllocatorsInit(CodeGenFunction &CGF,
.getLimitedValue());
LValue AllocatorTraitsLVal = CGF.EmitLValue(AllocatorTraits);
Address Addr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- AllocatorTraitsLVal.getAddress(CGF), CGF.VoidPtrPtrTy, CGF.VoidPtrTy);
+ AllocatorTraitsLVal.getAddress(), CGF.VoidPtrPtrTy, CGF.VoidPtrTy);
AllocatorTraitsLVal = CGF.MakeAddrLValue(Addr, CGF.getContext().VoidPtrTy,
AllocatorTraitsLVal.getBaseInfo(),
AllocatorTraitsLVal.getTBAAInfo());
@@ -7043,7 +7038,7 @@ private:
} else if ((AE && isa<CXXThisExpr>(AE->getBase()->IgnoreParenImpCasts())) ||
(OASE &&
isa<CXXThisExpr>(OASE->getBase()->IgnoreParenImpCasts()))) {
- BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
+ BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress();
} else if (OAShE &&
isa<CXXThisExpr>(OAShE->getBase()->IgnoreParenCasts())) {
BP = Address(
@@ -7053,7 +7048,7 @@ private:
} else {
// The base is the reference to the variable.
// BP = &Var.
- BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress(CGF);
+ BP = CGF.EmitOMPSharedLValue(AssocExpr).getAddress();
if (const auto *VD =
dyn_cast_or_null<VarDecl>(I->getAssociatedDeclaration())) {
if (std::optional<OMPDeclareTargetDeclAttr::MapTypeTy> Res =
@@ -7252,13 +7247,13 @@ private:
LValue BaseLVal = EmitMemberExprBase(CGF, ME);
LowestElem = CGF.EmitLValueForFieldInitialization(
BaseLVal, cast<FieldDecl>(MapDecl))
- .getAddress(CGF);
+ .getAddress();
LB = CGF.EmitLoadOfReferenceLValue(LowestElem, MapDecl->getType())
- .getAddress(CGF);
+ .getAddress();
} else {
LowestElem = LB =
CGF.EmitOMPSharedLValue(I->getAssociatedExpression())
- .getAddress(CGF);
+ .getAddress();
}
// If this component is a pointer inside the base struct then we don't
@@ -7316,11 +7311,11 @@ private:
LValue BaseLVal = EmitMemberExprBase(CGF, ME);
ComponentLB =
CGF.EmitLValueForFieldInitialization(BaseLVal, FD)
- .getAddress(CGF);
+ .getAddress();
} else {
ComponentLB =
CGF.EmitOMPSharedLValue(MC.getAssociatedExpression())
- .getAddress(CGF);
+ .getAddress();
}
llvm::Value *ComponentLBPtr = ComponentLB.emitRawPointer(CGF);
llvm::Value *LBPtr = LB.emitRawPointer(CGF);
@@ -7449,7 +7444,7 @@ private:
if (IsFinalArraySection) {
Address HB =
CGF.EmitArraySectionExpr(OASE, /*IsLowerBound=*/false)
- .getAddress(CGF);
+ .getAddress();
PartialStruct.HighestElem = {FieldIndex, HB};
} else {
PartialStruct.HighestElem = {FieldIndex, LowestElem};
@@ -7462,7 +7457,7 @@ private:
if (IsFinalArraySection) {
Address HB =
CGF.EmitArraySectionExpr(OASE, /*IsLowerBound=*/false)
- .getAddress(CGF);
+ .getAddress();
PartialStruct.HighestElem = {FieldIndex, HB};
} else {
PartialStruct.HighestElem = {FieldIndex, LowestElem};
@@ -11634,7 +11629,7 @@ Address CGOpenMPRuntime::emitLastprivateConditionalInit(CodeGenFunction &CGF,
CGF.EmitStoreOfScalar(
llvm::ConstantInt::getNullValue(CGF.ConvertTypeForMem(C.CharTy)),
FiredLVal);
- return CGF.EmitLValueForField(BaseLVal, VDField).getAddress(CGF);
+ return CGF.EmitLValueForField(BaseLVal, VDField).getAddress();
}
namespace {
@@ -11820,7 +11815,7 @@ void CGOpenMPRuntime::checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const FieldDecl* FiredDecl = std::get<2>(It->getSecond());
LValue PrivLVal = CGF.EmitLValue(FoundE);
Address StructAddr = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(
- PrivLVal.getAddress(CGF),
+ PrivLVal.getAddress(),
CGF.ConvertTypeForMem(CGF.getContext().getPointerType(StructTy)),
CGF.ConvertTypeForMem(StructTy));
LValue BaseLVal =
diff --git a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
index 87496c8e488c..28da8662f5f6 100644
--- a/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
+++ b/clang/lib/CodeGen/CGOpenMPRuntimeGPU.cpp
@@ -1103,13 +1103,13 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
VoidPtr, VarPtrTy, VD->getName() + "_on_stack");
LValue VarAddr =
CGF.MakeNaturalAlignPointeeRawAddrLValue(CastedVoidPtr, VarTy);
- Rec.second.PrivateAddr = VarAddr.getAddress(CGF);
+ Rec.second.PrivateAddr = VarAddr.getAddress();
Rec.second.GlobalizedVal = VoidPtr;
// Assign the local allocation to the newly globalized location.
if (EscapedParam) {
CGF.EmitStoreOfScalar(ParValue, VarAddr);
- I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress(CGF));
+ I->getSecond().MappedParams->setVarAddr(CGF, VD, VarAddr.getAddress());
}
if (auto *DI = CGF.getDebugInfo())
VoidPtr->setDebugLoc(DI->SourceLocToDebugLoc(VD->getLocation()));
@@ -1123,7 +1123,7 @@ void CGOpenMPRuntimeGPU::emitGenericVarsProlog(CodeGenFunction &CGF,
LValue Base = CGF.MakeAddrLValue(AddrSizePair.first, VD->getType(),
CGM.getContext().getDeclAlign(VD),
AlignmentSource::Decl);
- I->getSecond().MappedParams->setVarAddr(CGF, VD, Base.getAddress(CGF));
+ I->getSecond().MappedParams->setVarAddr(CGF, VD, Base.getAddress());
}
I->getSecond().MappedParams->apply(CGF);
}
@@ -2226,7 +2226,7 @@ static llvm::Value *emitListToGlobalCopyFunction(
Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD);
- Address GlobAddr = GlobLVal.getAddress(CGF);
+ Address GlobAddr = GlobLVal.getAddress();
GlobLVal.setAddress(Address(GlobAddr.emitRawPointer(CGF),
CGF.ConvertTypeForMem(Private->getType()),
GlobAddr.getAlignment()));
@@ -2327,7 +2327,7 @@ static llvm::Value *emitListToGlobalReduceFunction(
Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD);
- Address GlobAddr = GlobLVal.getAddress(CGF);
+ Address GlobAddr = GlobLVal.getAddress();
CGF.EmitStoreOfScalar(GlobAddr.emitRawPointer(CGF), Elem,
/*Volatile=*/false, C.VoidPtrTy);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
@@ -2433,7 +2433,7 @@ static llvm::Value *emitGlobalToListCopyFunction(
Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD);
- Address GlobAddr = GlobLVal.getAddress(CGF);
+ Address GlobAddr = GlobLVal.getAddress();
GlobLVal.setAddress(Address(GlobAddr.emitRawPointer(CGF),
CGF.ConvertTypeForMem(Private->getType()),
GlobAddr.getAlignment()));
@@ -2534,7 +2534,7 @@ static llvm::Value *emitGlobalToListReduceFunction(
Bld.CreateInBoundsGEP(LLVMReductionsBufferTy, BufferArrPtr, Idxs);
LValue GlobLVal = CGF.EmitLValueForField(
CGF.MakeNaturalAlignRawAddrLValue(BufferPtr, StaticTy), FD);
- Address GlobAddr = GlobLVal.getAddress(CGF);
+ Address GlobAddr = GlobLVal.getAddress();
CGF.EmitStoreOfScalar(GlobAddr.emitRawPointer(CGF), Elem,
/*Volatile=*/false, C.VoidPtrTy);
if ((*IPriv)->getType()->isVariablyModifiedType()) {
@@ -3406,7 +3406,7 @@ void CGOpenMPRuntimeGPU::adjustTargetSpecificDataForLambdas(
if (VD->getType().getCanonicalType()->isReferenceType())
VDAddr = CGF.EmitLoadOfReferenceLValue(VDAddr,
VD->getType().getCanonicalType())
- .getAddress(CGF);
+ .getAddress();
CGF.EmitStoreOfScalar(VDAddr.emitRawPointer(CGF), VarLVal);
}
}
diff --git a/clang/lib/CodeGen/CGStmt.cpp b/clang/lib/CodeGen/CGStmt.cpp
index 36776846cd44..99daaa14cf3f 100644
--- a/clang/lib/CodeGen/CGStmt.cpp
+++ b/clang/lib/CodeGen/CGStmt.cpp
@@ -2372,13 +2372,12 @@ std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
Ty = llvm::IntegerType::get(getLLVMContext(), Size);
- return {
- Builder.CreateLoad(InputValue.getAddress(*this).withElementType(Ty)),
- nullptr};
+ return {Builder.CreateLoad(InputValue.getAddress().withElementType(Ty)),
+ nullptr};
}
}
- Address Addr = InputValue.getAddress(*this);
+ Address Addr = InputValue.getAddress();
ConstraintStr += '*';
return {InputValue.getPointer(*this), Addr.getElementType()};
}
@@ -2574,7 +2573,7 @@ EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
// ResultTypeRequiresCast.size() elements of RegResults.
if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
- Address A = Dest.getAddress(CGF).withElementType(ResultRegTypes[i]);
+ Address A = Dest.getAddress().withElementType(ResultRegTypes[i]);
if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
Builder.CreateStore(Tmp, A);
continue;
@@ -2776,7 +2775,7 @@ void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
std::max((uint64_t)LargestVectorWidth,
VT->getPrimitiveSizeInBits().getKnownMinValue());
} else {
- Address DestAddr = Dest.getAddress(*this);
+ Address DestAddr = Dest.getAddress();
// Matrix types in memory are represented by arrays, but accessed through
// vector pointers, with the alignment specified on the access operation.
// For inline assembly, update pointer arguments to use vector pointers.
@@ -3124,7 +3123,7 @@ CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
LValue CapStruct = InitCapturedStruct(S);
- return CapStruct.getAddress(*this);
+ return CapStruct.getAddress();
}
/// Creates the outlined function for a CapturedStmt.
diff --git a/clang/lib/CodeGen/CGStmtOpenMP.cpp b/clang/lib/CodeGen/CGStmtOpenMP.cpp
index ef3aa3a8e0dc..eac5ef326293 100644
--- a/clang/lib/CodeGen/CGStmtOpenMP.cpp
+++ b/clang/lib/CodeGen/CGStmtOpenMP.cpp
@@ -100,7 +100,7 @@ public:
isCapturedVar(CGF, VD) || (CGF.CapturedStmtInfo &&
InlinedShareds.isGlobalVarCaptured(VD)),
VD->getType().getNonReferenceType(), VK_LValue, C.getLocation());
- InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
+ InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
}
}
(void)InlinedShareds.Privatize();
@@ -276,7 +276,7 @@ public:
InlinedShareds.isGlobalVarCaptured(VD)),
VD->getType().getNonReferenceType(), VK_LValue,
C.getLocation());
- InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress(CGF));
+ InlinedShareds.addPrivate(VD, CGF.EmitLValue(&DRE).getAddress());
}
}
CS = dyn_cast<CapturedStmt>(CS->getCapturedStmt());
@@ -369,8 +369,7 @@ void CodeGenFunction::GenerateOpenMPCapturedVars(
CapturedVars.push_back(CV);
} else {
assert(CurCap->capturesVariable() && "Expected capture by reference.");
- CapturedVars.push_back(
- EmitLValue(*I).getAddress(*this).emitRawPointer(*this));
+ CapturedVars.push_back(EmitLValue(*I).getAddress().emitRawPointer(*this));
}
}
}
@@ -381,11 +380,11 @@ static Address castValueFromUintptr(CodeGenFunction &CGF, SourceLocation Loc,
ASTContext &Ctx = CGF.getContext();
llvm::Value *CastedPtr = CGF.EmitScalarConversion(
- AddrLV.getAddress(CGF).emitRawPointer(CGF), Ctx.getUIntPtrType(),
+ AddrLV.getAddress().emitRawPointer(CGF), Ctx.getUIntPtrType(),
Ctx.getPointerType(DstType), Loc);
// FIXME: should the pointee type (DstType) be passed?
Address TmpAddr =
- CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress(CGF);
+ CGF.MakeNaturalAlignAddrLValue(CastedPtr, DstType).getAddress();
return TmpAddr;
}
@@ -578,7 +577,7 @@ static llvm::Function *emitOutlinedFunctionPrologue(
} else if (I->capturesVariable()) {
const VarDecl *Var = I->getCapturedVar();
QualType VarTy = Var->getType();
- Address ArgAddr = ArgLVal.getAddress(CGF);
+ Address ArgAddr = ArgLVal.getAddress();
if (ArgLVal.getType()->isLValueReferenceType()) {
ArgAddr = CGF.EmitLoadOfReference(ArgLVal);
} else if (!VarTy->isVariablyModifiedType() || !VarTy->isPointerType()) {
@@ -599,12 +598,12 @@ static llvm::Function *emitOutlinedFunctionPrologue(
? castValueFromUintptr(
CGF, I->getLocation(), FD->getType(),
Args[Cnt]->getName(), ArgLVal)
- : ArgLVal.getAddress(CGF)}});
+ : ArgLVal.getAddress()}});
} else {
// If 'this' is captured, load it into CXXThisValue.
assert(I->capturesThis());
CXXThisValue = CGF.EmitLoadOfScalar(ArgLVal, I->getLocation());
- LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress(CGF)}});
+ LocalAddrs.insert({Args[Cnt], {nullptr, ArgLVal.getAddress()}});
}
++Cnt;
++I;
@@ -674,7 +673,7 @@ CodeGenFunction::GenerateOpenMPCapturedStmtFunction(const CapturedStmt &S,
I->second.first ? I->second.first->getType() : Arg->getType(),
AlignmentSource::Decl);
if (LV.getType()->isAnyComplexType())
- LV.setAddress(LV.getAddress(WrapperCGF).withElementType(PI->getType()));
+ LV.setAddress(LV.getAddress().withElementType(PI->getType()));
CallArg = WrapperCGF.EmitLoadOfScalar(LV, S.getBeginLoc());
} else {
auto EI = VLASizes.find(Arg);
@@ -890,8 +889,7 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
EmitAggregateAssign(Dest, OriginalLVal, Type);
} else {
EmitOMPAggregateAssign(
- Emission.getAllocatedAddress(), OriginalLVal.getAddress(*this),
- Type,
+ Emission.getAllocatedAddress(), OriginalLVal.getAddress(), Type,
[this, VDInit, Init](Address DestElement, Address SrcElement) {
// Clean up any temporaries needed by the
// initialization.
@@ -908,7 +906,7 @@ bool CodeGenFunction::EmitOMPFirstprivateClause(const OMPExecutableDirective &D,
IsRegistered =
PrivateScope.addPrivate(OrigVD, Emission.getAllocatedAddress());
} else {
- Address OriginalAddr = OriginalLVal.getAddress(*this);
+ Address OriginalAddr = OriginalLVal.getAddress();
// Emit private VarDecl with copy init.
// Remap temp VDInit variable to the address of the original
// variable (for proper handling of captured global variables).
@@ -997,7 +995,7 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
"Copyin threadprivates should have been captured!");
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD), true,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
- MasterAddr = EmitLValue(&DRE).getAddress(*this);
+ MasterAddr = EmitLValue(&DRE).getAddress();
LocalDeclMap.erase(VD);
} else {
MasterAddr =
@@ -1007,7 +1005,7 @@ bool CodeGenFunction::EmitOMPCopyinClause(const OMPExecutableDirective &D) {
getContext().getDeclAlign(VD));
}
// Get the address of the threadprivate variable.
- Address PrivateAddr = EmitLValue(*IRef).getAddress(*this);
+ Address PrivateAddr = EmitLValue(*IRef).getAddress();
if (CopiedVars.size() == 1) {
// At first check if current thread is a master thread. If it is, no
// need to copy data.
@@ -1076,7 +1074,7 @@ bool CodeGenFunction::EmitOMPLastprivateClauseInit(
/*RefersToEnclosingVariableOrCapture=*/
CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IRef)->getType(), VK_LValue, (*IRef)->getExprLoc());
- PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress(*this));
+ PrivateScope.addPrivate(DestVD, EmitLValue(&DRE).getAddress());
// Check if the variable is also a firstprivate: in this case IInit is
// not generated. Initialization of this variable will happen in codegen
// for 'firstprivate' clause.
@@ -1239,7 +1237,7 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
RedCG.emitAggregateType(*this, Count);
AutoVarEmission Emission = EmitAutoVarAlloca(*PrivateVD);
RedCG.emitInitialization(*this, Count, Emission.getAllocatedAddress(),
- RedCG.getSharedLValue(Count).getAddress(*this),
+ RedCG.getSharedLValue(Count).getAddress(),
[&Emission](CodeGenFunction &CGF) {
CGF.EmitAutoVarInit(Emission);
return true;
@@ -1260,22 +1258,20 @@ void CodeGenFunction::EmitOMPReductionClauseInit(
if (isaOMPArraySectionExpr && Type->isVariablyModifiedType()) {
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD,
- RedCG.getSharedLValue(Count).getAddress(*this));
+ PrivateScope.addPrivate(LHSVD, RedCG.getSharedLValue(Count).getAddress());
PrivateScope.addPrivate(RHSVD, GetAddrOfLocalVar(PrivateVD));
} else if ((isaOMPArraySectionExpr && Type->isScalarType()) ||
isa<ArraySubscriptExpr>(IRef)) {
// Store the address of the original variable associated with the LHS
// implicit variable.
- PrivateScope.addPrivate(LHSVD,
- RedCG.getSharedLValue(Count).getAddress(*this));
+ PrivateScope.addPrivate(LHSVD, RedCG.getSharedLValue(Count).getAddress());
PrivateScope.addPrivate(RHSVD,
GetAddrOfLocalVar(PrivateVD).withElementType(
ConvertTypeForMem(RHSVD->getType())));
} else {
QualType Type = PrivateVD->getType();
bool IsArray = getContext().getAsArrayType(Type) != nullptr;
- Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress(*this);
+ Address OriginalAddr = RedCG.getSharedLValue(Count).getAddress();
// Store the address of the original variable associated with the LHS
// implicit variable.
if (IsArray) {
@@ -2069,7 +2065,7 @@ void CodeGenFunction::EmitOMPCanonicalLoop(const OMPCanonicalLoop *S) {
// variable and emit the body.
const DeclRefExpr *LoopVarRef = S->getLoopVarRef();
LValue LCVal = EmitLValue(LoopVarRef);
- Address LoopVarAddress = LCVal.getAddress(*this);
+ Address LoopVarAddress = LCVal.getAddress();
emitCapturedStmtCall(*this, LoopVarClosure,
{LoopVarAddress.emitRawPointer(*this), IndVar});
@@ -2210,7 +2206,7 @@ void CodeGenFunction::EmitOMPLinearClauseFinal(
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(OrigVD),
CapturedStmtInfo->lookup(OrigVD) != nullptr,
(*IC)->getType(), VK_LValue, (*IC)->getExprLoc());
- Address OrigAddr = EmitLValue(&DRE).getAddress(*this);
+ Address OrigAddr = EmitLValue(&DRE).getAddress();
CodeGenFunction::OMPPrivateScope VarScope(*this);
VarScope.addPrivate(OrigVD, OrigAddr);
(void)VarScope.Privatize();
@@ -2277,7 +2273,7 @@ void CodeGenFunction::EmitOMPPrivateLoopCounters(
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(VD),
LocalDeclMap.count(VD) || CapturedStmtInfo->lookup(VD),
E->getType(), VK_LValue, E->getExprLoc());
- (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress(*this));
+ (void)LoopScope.addPrivate(PrivateVD, EmitLValue(&DRE).getAddress());
} else {
(void)LoopScope.addPrivate(PrivateVD, VarEmission.getAllocatedAddress());
}
@@ -2443,13 +2439,12 @@ void CodeGenFunction::EmitOMPSimdFinal(
}
Address OrigAddr = Address::invalid();
if (CED) {
- OrigAddr =
- EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress(*this);
+ OrigAddr = EmitLValue(CED->getInit()->IgnoreImpCasts()).getAddress();
} else {
DeclRefExpr DRE(getContext(), const_cast<VarDecl *>(PrivateVD),
/*RefersToEnclosingVariableOrCapture=*/false,
(*IPC)->getType(), VK_LValue, (*IPC)->getExprLoc());
- OrigAddr = EmitLValue(&DRE).getAddress(*this);
+ OrigAddr = EmitLValue(&DRE).getAddress();
}
OMPPrivateScope VarScope(*this);
VarScope.addPrivate(OrigVD, OrigAddr);
@@ -3165,16 +3160,14 @@ static void emitDistributeParallelForDistributeInnerBoundParams(
const auto &Dir = cast<OMPLoopDirective>(S);
LValue LB =
CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedLowerBoundVariable()));
- llvm::Value *LBCast =
- CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(LB.getAddress(CGF)),
- CGF.SizeTy, /*isSigned=*/false);
+ llvm::Value *LBCast = CGF.Builder.CreateIntCast(
+ CGF.Builder.CreateLoad(LB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
CapturedVars.push_back(LBCast);
LValue UB =
CGF.EmitLValue(cast<DeclRefExpr>(Dir.getCombinedUpperBoundVariable()));
- llvm::Value *UBCast =
- CGF.Builder.CreateIntCast(CGF.Builder.CreateLoad(UB.getAddress(CGF)),
- CGF.SizeTy, /*isSigned=*/false);
+ llvm::Value *UBCast = CGF.Builder.CreateIntCast(
+ CGF.Builder.CreateLoad(UB.getAddress()), CGF.SizeTy, /*isSigned=*/false);
CapturedVars.push_back(UBCast);
}
@@ -3426,8 +3419,8 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
// one chunk is distributed to each thread. Note that the size of
// the chunks is unspecified in this case.
CGOpenMPRuntime::StaticRTInput StaticInit(
- IVSize, IVSigned, Ordered, IL.getAddress(CGF),
- LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF),
+ IVSize, IVSigned, Ordered, IL.getAddress(), LB.getAddress(),
+ UB.getAddress(), ST.getAddress(),
StaticChunkedOne ? Chunk : nullptr);
CGF.CGM.getOpenMPRuntime().emitForStaticInit(
CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind,
@@ -3470,9 +3463,9 @@ bool CodeGenFunction::EmitOMPWorksharingLoop(
} else {
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
- OMPLoopArguments LoopArguments(
- LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
- IL.getAddress(*this), Chunk, EUB);
+ OMPLoopArguments LoopArguments(LB.getAddress(), UB.getAddress(),
+ ST.getAddress(), IL.getAddress(), Chunk,
+ EUB);
LoopArguments.DKind = OMPD_for;
EmitOMPForOuterLoop(ScheduleKind, IsMonotonic, S, LoopScope, Ordered,
LoopArguments, CGDispatchBounds);
@@ -3639,11 +3632,10 @@ static void emitScanBasedDirectiveFinals(
RValue::get(OMPLast));
LValue DestLVal = CGF.EmitLValue(OrigExpr);
LValue SrcLVal = CGF.EmitLValue(CopyArrayElem);
- CGF.EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(CGF),
- SrcLVal.getAddress(CGF),
- cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
- CopyOps[I]);
+ CGF.EmitOMPCopy(
+ PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
}
}
@@ -3753,7 +3745,7 @@ static void emitScanBasedDirective(
cast<OpaqueValueExpr>(
cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
RValue::get(IVal));
- LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ LHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress();
}
PrivScope.addPrivate(LHSVD, LHSAddr);
Address RHSAddr = Address::invalid();
@@ -3764,7 +3756,7 @@ static void emitScanBasedDirective(
cast<OpaqueValueExpr>(
cast<ArraySubscriptExpr>(CopyArrayElem)->getIdx()),
RValue::get(OffsetIVal));
- RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress(CGF);
+ RHSAddr = CGF.EmitLValue(CopyArrayElem).getAddress();
}
PrivScope.addPrivate(RHSVD, RHSAddr);
++ILHS;
@@ -4078,8 +4070,8 @@ void CodeGenFunction::EmitSections(const OMPExecutableDirective &S) {
OpenMPScheduleTy ScheduleKind;
ScheduleKind.Schedule = OMPC_SCHEDULE_static;
CGOpenMPRuntime::StaticRTInput StaticInit(
- /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(CGF),
- LB.getAddress(CGF), UB.getAddress(CGF), ST.getAddress(CGF));
+ /*IVSize=*/32, /*IVSigned=*/true, /*Ordered=*/false, IL.getAddress(),
+ LB.getAddress(), UB.getAddress(), ST.getAddress());
CGF.CGM.getOpenMPRuntime().emitForStaticInit(
CGF, S.getBeginLoc(), S.getDirectiveKind(), ScheduleKind, StaticInit);
// UB = min(UB, GlobalUB);
@@ -4858,7 +4850,7 @@ void CodeGenFunction::EmitOMPTaskBasedDirective(
CGF.CapturedStmtInfo->lookup(OrigVD) != nullptr,
Pair.second->getType(), VK_LValue,
Pair.second->getExprLoc());
- Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress(CGF));
+ Scope.addPrivate(Pair.first, CGF.EmitLValue(&DRE).getAddress());
}
for (const auto &Pair : PrivatePtrs) {
Address Replacement = Address(
@@ -5505,8 +5497,8 @@ void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
*cast<VarDecl>(cast<DeclRefExpr>(TempExpr)->getDecl()));
LValue DestLVal = EmitLValue(TempExpr);
LValue SrcLVal = EmitLValue(LHSs[I]);
- EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
- SrcLVal.getAddress(*this),
+ EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(),
+ SrcLVal.getAddress(),
cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
CopyOps[I]);
@@ -5527,11 +5519,10 @@ void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
DestLVal = EmitLValue(RHSs[I]);
SrcLVal = EmitLValue(TempExpr);
}
- EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
- SrcLVal.getAddress(*this),
- cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
- CopyOps[I]);
+ EmitOMPCopy(
+ PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
}
}
EmitBranch(IsInclusive ? OMPAfterScanBlock : OMPBeforeScanBlock);
@@ -5564,11 +5555,10 @@ void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
RValue::get(IdxVal));
LValue DestLVal = EmitLValue(CopyArrayElem);
LValue SrcLVal = EmitLValue(OrigExpr);
- EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
- SrcLVal.getAddress(*this),
- cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
- CopyOps[I]);
+ EmitOMPCopy(
+ PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
}
}
EmitBranch(BreakContinueStack.back().ContinueBlock.getBlock());
@@ -5606,11 +5596,10 @@ void CodeGenFunction::EmitOMPScanDirective(const OMPScanDirective &S) {
RValue::get(IdxVal));
LValue SrcLVal = EmitLValue(CopyArrayElem);
LValue DestLVal = EmitLValue(OrigExpr);
- EmitOMPCopy(PrivateExpr->getType(), DestLVal.getAddress(*this),
- SrcLVal.getAddress(*this),
- cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
- cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()),
- CopyOps[I]);
+ EmitOMPCopy(
+ PrivateExpr->getType(), DestLVal.getAddress(), SrcLVal.getAddress(),
+ cast<VarDecl>(cast<DeclRefExpr>(LHSs[I])->getDecl()),
+ cast<VarDecl>(cast<DeclRefExpr>(RHSs[I])->getDecl()), CopyOps[I]);
}
if (!IsInclusive) {
EmitBlock(ExclusiveExitBB);
@@ -5735,8 +5724,8 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
/* Chunked */ Chunk != nullptr) ||
StaticChunked) {
CGOpenMPRuntime::StaticRTInput StaticInit(
- IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(*this),
- LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
+ IVSize, IVSigned, /* Ordered = */ false, IL.getAddress(),
+ LB.getAddress(), UB.getAddress(), ST.getAddress(),
StaticChunked ? Chunk : nullptr);
RT.emitDistributeStaticInit(*this, S.getBeginLoc(), ScheduleKind,
StaticInit);
@@ -5812,8 +5801,8 @@ void CodeGenFunction::EmitOMPDistributeLoop(const OMPLoopDirective &S,
// Emit the outer loop, which requests its work chunk [LB..UB] from
// runtime and runs the inner loop to process it.
const OMPLoopArguments LoopArguments = {
- LB.getAddress(*this), UB.getAddress(*this), ST.getAddress(*this),
- IL.getAddress(*this), Chunk};
+ LB.getAddress(), UB.getAddress(), ST.getAddress(), IL.getAddress(),
+ Chunk};
EmitOMPDistributeOuterLoop(ScheduleKind, S, LoopScope, LoopArguments,
CodeGenLoop);
}
@@ -6127,8 +6116,7 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
// target platform.
if (BO == BO_Comma || !Update.isScalar() || !X.isSimple() ||
(!isa<llvm::ConstantInt>(Update.getScalarVal()) &&
- (Update.getScalarVal()->getType() !=
- X.getAddress(CGF).getElementType())) ||
+ (Update.getScalarVal()->getType() != X.getAddress().getElementType())) ||
!Context.getTargetInfo().hasBuiltinAtomic(
Context.getTypeSize(X.getType()), Context.toBits(X.getAlignment())))
return std::make_pair(false, RValue::get(nullptr));
@@ -6144,10 +6132,10 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
};
if (!CheckAtomicSupport(Update.getScalarVal()->getType(), BO) ||
- !CheckAtomicSupport(X.getAddress(CGF).getElementType(), BO))
+ !CheckAtomicSupport(X.getAddress().getElementType(), BO))
return std::make_pair(false, RValue::get(nullptr));
- bool IsInteger = X.getAddress(CGF).getElementType()->isIntegerTy();
+ bool IsInteger = X.getAddress().getElementType()->isIntegerTy();
llvm::AtomicRMWInst::BinOp RMWOp;
switch (BO) {
case BO_Add:
@@ -6224,14 +6212,14 @@ static std::pair<bool, RValue> emitOMPAtomicRMW(CodeGenFunction &CGF, LValue X,
if (auto *IC = dyn_cast<llvm::ConstantInt>(UpdateVal)) {
if (IsInteger)
UpdateVal = CGF.Builder.CreateIntCast(
- IC, X.getAddress(CGF).getElementType(),
+ IC, X.getAddress().getElementType(),
X.getType()->hasSignedIntegerRepresentation());
else
UpdateVal = CGF.Builder.CreateCast(llvm::Instruction::CastOps::UIToFP, IC,
- X.getAddress(CGF).getElementType());
+ X.getAddress().getElementType());
}
llvm::Value *Res =
- CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(CGF), UpdateVal, AO);
+ CGF.Builder.CreateAtomicRMW(RMWOp, X.getAddress(), UpdateVal, AO);
return std::make_pair(true, RValue::get(Res));
}
@@ -6456,7 +6444,7 @@ static void emitOMPAtomicCompareExpr(
}
LValue XLVal = CGF.EmitLValue(X);
- Address XAddr = XLVal.getAddress(CGF);
+ Address XAddr = XLVal.getAddress();
auto EmitRValueWithCastIfNeeded = [&CGF, Loc](const Expr *X, const Expr *E) {
if (X->getType() == E->getType())
@@ -6472,12 +6460,12 @@ static void emitOMPAtomicCompareExpr(
llvm::Value *DVal = D ? EmitRValueWithCastIfNeeded(X, D) : nullptr;
if (auto *CI = dyn_cast<llvm::ConstantInt>(EVal))
EVal = CGF.Builder.CreateIntCast(
- CI, XLVal.getAddress(CGF).getElementType(),
+ CI, XLVal.getAddress().getElementType(),
E->getType()->hasSignedIntegerRepresentation());
if (DVal)
if (auto *CI = dyn_cast<llvm::ConstantInt>(DVal))
DVal = CGF.Builder.CreateIntCast(
- CI, XLVal.getAddress(CGF).getElementType(),
+ CI, XLVal.getAddress().getElementType(),
D->getType()->hasSignedIntegerRepresentation());
llvm::OpenMPIRBuilder::AtomicOpValue XOpVal{
@@ -6487,14 +6475,14 @@ static void emitOMPAtomicCompareExpr(
llvm::OpenMPIRBuilder::AtomicOpValue VOpVal, ROpVal;
if (V) {
LValue LV = CGF.EmitLValue(V);
- Address Addr = LV.getAddress(CGF);
+ Address Addr = LV.getAddress();
VOpVal = {Addr.emitRawPointer(CGF), Addr.getElementType(),
V->getType()->hasSignedIntegerRepresentation(),
V->getType().isVolatileQualified()};
}
if (R) {
LValue LV = CGF.EmitLValue(R);
- Address Addr = LV.getAddress(CGF);
+ Address Addr = LV.getAddress();
ROpVal = {Addr.emitRawPointer(CGF), Addr.getElementType(),
R->getType()->hasSignedIntegerRepresentation(),
R->getType().isVolatileQualified()};
@@ -8127,7 +8115,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
continue;
if (!CGF.LocalDeclMap.count(VD)) {
LValue GlobLVal = CGF.EmitLValue(Ref);
- GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF));
+ GlobalsScope.addPrivate(VD, GlobLVal.getAddress());
}
}
}
@@ -8142,7 +8130,7 @@ void CodeGenFunction::EmitSimpleOMPExecutableDirective(
const auto *VD = cast<VarDecl>(cast<DeclRefExpr>(E)->getDecl());
if (!VD->hasLocalStorage() && !CGF.LocalDeclMap.count(VD)) {
LValue GlobLVal = CGF.EmitLValue(E);
- GlobalsScope.addPrivate(VD, GlobLVal.getAddress(CGF));
+ GlobalsScope.addPrivate(VD, GlobLVal.getAddress());
}
if (isa<OMPCapturedExprDecl>(VD)) {
// Emit only those that were not explicitly referenced in clauses.
diff --git a/clang/lib/CodeGen/CGValue.h b/clang/lib/CodeGen/CGValue.h
index cc9ad10ae596..f1ba3cf95ae5 100644
--- a/clang/lib/CodeGen/CGValue.h
+++ b/clang/lib/CodeGen/CGValue.h
@@ -367,10 +367,7 @@ public:
return Addr.isValid() ? Addr.emitRawPointer(CGF) : nullptr;
}
- Address getAddress(CodeGenFunction &CGF) const {
- // FIXME: remove parameter.
- return Addr;
- }
+ Address getAddress() const { return Addr; }
void setAddress(Address address) { Addr = address; }
@@ -503,8 +500,8 @@ public:
return R;
}
- RValue asAggregateRValue(CodeGenFunction &CGF) const {
- return RValue::getAggregate(getAddress(CGF), isVolatileQualified());
+ RValue asAggregateRValue() const {
+ return RValue::getAggregate(getAddress(), isVolatileQualified());
}
};
@@ -607,11 +604,11 @@ public:
}
static AggValueSlot
- forLValue(const LValue &LV, CodeGenFunction &CGF, IsDestructed_t isDestructed,
+ forLValue(const LValue &LV, IsDestructed_t isDestructed,
NeedsGCBarriers_t needsGC, IsAliased_t isAliased,
Overlap_t mayOverlap, IsZeroed_t isZeroed = IsNotZeroed,
IsSanitizerChecked_t isChecked = IsNotSanitizerChecked) {
- return forAddr(LV.getAddress(CGF), LV.getQuals(), isDestructed, needsGC,
+ return forAddr(LV.getAddress(), LV.getQuals(), isDestructed, needsGC,
isAliased, mayOverlap, isZeroed, isChecked);
}
diff --git a/clang/lib/CodeGen/CodeGenFunction.cpp b/clang/lib/CodeGen/CodeGenFunction.cpp
index 04abdadd9537..f0345f3b191b 100644
--- a/clang/lib/CodeGen/CodeGenFunction.cpp
+++ b/clang/lib/CodeGen/CodeGenFunction.cpp
@@ -2478,11 +2478,11 @@ void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
Address CodeGenFunction::EmitVAListRef(const Expr* E) {
if (getContext().getBuiltinVaListType()->isArrayType())
return EmitPointerWithAlignment(E);
- return EmitLValue(E).getAddress(*this);
+ return EmitLValue(E).getAddress();
}
Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
- return EmitLValue(E).getAddress(*this);
+ return EmitLValue(E).getAddress();
}
void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 227813ad44e8..60ef28a0effa 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -5740,15 +5740,17 @@ CodeGenModule::getLLVMLinkageVarDefinition(const VarDecl *VD) {
static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
llvm::Function *newFn) {
// Fast path.
- if (old->use_empty()) return;
+ if (old->use_empty())
+ return;
llvm::Type *newRetTy = newFn->getReturnType();
- SmallVector<llvm::Value*, 4> newArgs;
+ SmallVector<llvm::Value *, 4> newArgs;
+
+ SmallVector<llvm::CallBase *> callSitesToBeRemovedFromParent;
for (llvm::Value::use_iterator ui = old->use_begin(), ue = old->use_end();
- ui != ue; ) {
- llvm::Value::use_iterator use = ui++; // Increment before the use is erased.
- llvm::User *user = use->getUser();
+ ui != ue; ui++) {
+ llvm::User *user = ui->getUser();
// Recognize and replace uses of bitcasts. Most calls to
// unprototyped functions will use bitcasts.
@@ -5760,8 +5762,9 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
// Recognize calls to the function.
llvm::CallBase *callSite = dyn_cast<llvm::CallBase>(user);
- if (!callSite) continue;
- if (!callSite->isCallee(&*use))
+ if (!callSite)
+ continue;
+ if (!callSite->isCallee(&*ui))
continue;
// If the return types don't match exactly, then we can't
@@ -5830,6 +5833,10 @@ static void replaceUsesOfNonProtoConstant(llvm::Constant *old,
if (callSite->getDebugLoc())
newCall->setDebugLoc(callSite->getDebugLoc());
+ callSitesToBeRemovedFromParent.push_back(callSite);
+ }
+
+ for (auto *callSite : callSitesToBeRemovedFromParent) {
callSite->eraseFromParent();
}
}
diff --git a/clang/lib/CodeGen/CodeGenTypes.cpp b/clang/lib/CodeGen/CodeGenTypes.cpp
index e8d75eda029e..0a926e4ac27f 100644
--- a/clang/lib/CodeGen/CodeGenTypes.cpp
+++ b/clang/lib/CodeGen/CodeGenTypes.cpp
@@ -523,8 +523,7 @@ llvm::Type *CodeGenTypes::ConvertType(QualType T) {
return llvm::StructType::get(getLLVMContext(), EltTys);
}
return llvm::ScalableVectorType::get(ConvertType(Info.ElementType),
- Info.EC.getKnownMinValue() *
- Info.NumVectors);
+ Info.EC.getKnownMinValue());
}
#define WASM_REF_TYPE(Name, MangledName, Id, SingletonId, AS) \
case BuiltinType::Id: { \
diff --git a/clang/lib/CodeGen/Targets/AArch64.cpp b/clang/lib/CodeGen/Targets/AArch64.cpp
index 0a4711fb2170..9aa3ea75681b 100644
--- a/clang/lib/CodeGen/Targets/AArch64.cpp
+++ b/clang/lib/CodeGen/Targets/AArch64.cpp
@@ -750,18 +750,7 @@ Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
// Again, stack arguments may need realignment. In this case both integer and
// floating-point ones might be affected.
if (!IsIndirect && TyAlign.getQuantity() > 8) {
- int Align = TyAlign.getQuantity();
-
- OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
-
- OnStackPtr = CGF.Builder.CreateAdd(
- OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
- "align_stack");
- OnStackPtr = CGF.Builder.CreateAnd(
- OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
- "align_stack");
-
- OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
+ OnStackPtr = emitRoundPointerUpToAlignment(CGF, OnStackPtr, TyAlign);
}
Address OnStackAddr = Address(OnStackPtr, CGF.Int8Ty,
std::max(CharUnits::fromQuantity(8), TyAlign));
diff --git a/clang/lib/CodeGen/Targets/NVPTX.cpp b/clang/lib/CodeGen/Targets/NVPTX.cpp
index 7dce5042c3dc..df798ce0ca67 100644
--- a/clang/lib/CodeGen/Targets/NVPTX.cpp
+++ b/clang/lib/CodeGen/Targets/NVPTX.cpp
@@ -85,7 +85,7 @@ private:
LValue Src) {
llvm::Value *Handle = nullptr;
llvm::Constant *C =
- llvm::dyn_cast<llvm::Constant>(Src.getAddress(CGF).emitRawPointer(CGF));
+ llvm::dyn_cast<llvm::Constant>(Src.getAddress().emitRawPointer(CGF));
// Lookup `addrspacecast` through the constant pointer if any.
if (auto *ASC = llvm::dyn_cast_or_null<llvm::AddrSpaceCastOperator>(C))
C = llvm::cast<llvm::Constant>(ASC->getPointerOperand());
diff --git a/clang/lib/CodeGen/Targets/X86.cpp b/clang/lib/CodeGen/Targets/X86.cpp
index 29d98aad8fcb..43dadf5e724a 100644
--- a/clang/lib/CodeGen/Targets/X86.cpp
+++ b/clang/lib/CodeGen/Targets/X86.cpp
@@ -327,7 +327,7 @@ void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
ResultTruncRegTypes.push_back(CoerceTy);
// Coerce the integer by bitcasting the return slot pointer.
- ReturnSlot.setAddress(ReturnSlot.getAddress(CGF).withElementType(CoerceTy));
+ ReturnSlot.setAddress(ReturnSlot.getAddress().withElementType(CoerceTy));
ResultRegDests.push_back(ReturnSlot);
rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
diff --git a/clang/lib/Driver/ToolChains/AIX.cpp b/clang/lib/Driver/ToolChains/AIX.cpp
index 85825e1ea65b..381d72e045b9 100644
--- a/clang/lib/Driver/ToolChains/AIX.cpp
+++ b/clang/lib/Driver/ToolChains/AIX.cpp
@@ -479,14 +479,6 @@ static void addTocDataOptions(const llvm::opt::ArgList &Args,
return false;
}();
- // Currently only supported for small code model.
- if (TOCDataGloballyinEffect &&
- (Args.getLastArgValue(options::OPT_mcmodel_EQ) == "large" ||
- Args.getLastArgValue(options::OPT_mcmodel_EQ) == "medium")) {
- D.Diag(clang::diag::warn_drv_unsupported_tocdata);
- return;
- }
-
enum TOCDataSetting {
AddressInTOC = 0, // Address of the symbol stored in the TOC.
DataInTOC = 1 // Symbol defined in the TOC.
diff --git a/clang/lib/ExtractAPI/DeclarationFragments.cpp b/clang/lib/ExtractAPI/DeclarationFragments.cpp
index 98b9343924a8..d88e4e0df8ef 100644
--- a/clang/lib/ExtractAPI/DeclarationFragments.cpp
+++ b/clang/lib/ExtractAPI/DeclarationFragments.cpp
@@ -999,11 +999,11 @@ DeclarationFragmentsBuilder::getFragmentsForTemplateParameters(
DeclarationFragments::FragmentKind::GenericParameter);
if (TemplateParam->hasDefaultArgument()) {
- DeclarationFragments After;
+ const auto Default = TemplateParam->getDefaultArgument();
Fragments.append(" = ", DeclarationFragments::FragmentKind::Text)
- .append(getFragmentsForType(TemplateParam->getDefaultArgument(),
- TemplateParam->getASTContext(), After));
- Fragments.append(std::move(After));
+ .append(getFragmentsForTemplateArguments(
+ {Default.getArgument()}, TemplateParam->getASTContext(),
+ {Default}));
}
} else if (const auto *NTP =
dyn_cast<NonTypeTemplateParmDecl>(ParameterArray[i])) {
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 52005a6c881f..9cba0c2614ee 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -28,7 +28,7 @@
using clang::format::FormatStyle;
-LLVM_YAML_IS_SEQUENCE_VECTOR(clang::format::FormatStyle::RawStringFormat)
+LLVM_YAML_IS_SEQUENCE_VECTOR(FormatStyle::RawStringFormat)
namespace llvm {
namespace yaml {
@@ -1235,7 +1235,7 @@ std::error_code make_error_code(ParseError e) {
return std::error_code(static_cast<int>(e), getParseCategory());
}
-inline llvm::Error make_string_error(const llvm::Twine &Message) {
+inline llvm::Error make_string_error(const Twine &Message) {
return llvm::make_error<llvm::StringError>(Message,
llvm::inconvertibleErrorCode());
}
@@ -2361,7 +2361,7 @@ private:
// FIXME: handle error. For now, print error message and skip the
// replacement for release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false);
}
};
@@ -2802,7 +2802,7 @@ private:
// FIXME: better error handling. for now just print error message and skip
// for the release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false && "Fixes must not conflict!");
}
Idx = End + 1;
@@ -3074,7 +3074,7 @@ static void sortCppIncludes(const FormatStyle &Style,
llvm::to_vector<16>(llvm::seq<unsigned>(0, Includes.size()));
if (Style.SortIncludes == FormatStyle::SI_CaseInsensitive) {
- llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
const auto LHSFilenameLower = Includes[LHSI].Filename.lower();
const auto RHSFilenameLower = Includes[RHSI].Filename.lower();
return std::tie(Includes[LHSI].Priority, LHSFilenameLower,
@@ -3083,7 +3083,7 @@ static void sortCppIncludes(const FormatStyle &Style,
Includes[RHSI].Filename);
});
} else {
- llvm::stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ stable_sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
return std::tie(Includes[LHSI].Priority, Includes[LHSI].Filename) <
std::tie(Includes[RHSI].Priority, Includes[RHSI].Filename);
});
@@ -3115,7 +3115,7 @@ static void sortCppIncludes(const FormatStyle &Style,
// enough as additional newlines might be added or removed across #include
// blocks. This we handle below by generating the updated #include blocks and
// comparing it to the original.
- if (Indices.size() == Includes.size() && llvm::is_sorted(Indices) &&
+ if (Indices.size() == Includes.size() && is_sorted(Indices) &&
Style.IncludeStyle.IncludeBlocks == tooling::IncludeStyle::IBS_Preserve) {
return;
}
@@ -3154,7 +3154,7 @@ static void sortCppIncludes(const FormatStyle &Style,
// FIXME: better error handling. For now, just skip the replacement for the
// release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false);
}
}
@@ -3307,7 +3307,7 @@ static void sortJavaImports(const FormatStyle &Style,
bool StaticImportAfterNormalImport =
Style.SortJavaStaticImport == FormatStyle::SJSIO_After;
- llvm::sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
+ sort(Indices, [&](unsigned LHSI, unsigned RHSI) {
// Negating IsStatic to push static imports above non-static imports.
return std::make_tuple(!Imports[LHSI].IsStatic ^
StaticImportAfterNormalImport,
@@ -3357,7 +3357,7 @@ static void sortJavaImports(const FormatStyle &Style,
// FIXME: better error handling. For now, just skip the replacement for the
// release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false);
}
}
@@ -3451,7 +3451,7 @@ tooling::Replacements sortIncludes(const FormatStyle &Style, StringRef Code,
}
template <typename T>
-static llvm::Expected<tooling::Replacements>
+static Expected<tooling::Replacements>
processReplacements(T ProcessFunc, StringRef Code,
const tooling::Replacements &Replaces,
const FormatStyle &Style) {
@@ -3470,7 +3470,7 @@ processReplacements(T ProcessFunc, StringRef Code,
return Replaces.merge(FormatReplaces);
}
-llvm::Expected<tooling::Replacements>
+Expected<tooling::Replacements>
formatReplacements(StringRef Code, const tooling::Replacements &Replaces,
const FormatStyle &Style) {
// We need to use lambda function here since there are two versions of
@@ -3515,13 +3515,13 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
return Replaces;
tooling::Replacements HeaderInsertions;
- std::set<llvm::StringRef> HeadersToDelete;
+ std::set<StringRef> HeadersToDelete;
tooling::Replacements Result;
for (const auto &R : Replaces) {
if (isHeaderInsertion(R)) {
// Replacements from \p Replaces must be conflict-free already, so we can
// simply consume the error.
- llvm::consumeError(HeaderInsertions.add(R));
+ consumeError(HeaderInsertions.add(R));
} else if (isHeaderDeletion(R)) {
HeadersToDelete.insert(R.getReplacementText());
} else if (R.getOffset() == UINT_MAX) {
@@ -3529,7 +3529,7 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
"not supported! "
<< R.getReplacementText() << "\n";
} else {
- llvm::consumeError(Result.add(R));
+ consumeError(Result.add(R));
}
}
if (HeaderInsertions.empty() && HeadersToDelete.empty())
@@ -3546,13 +3546,12 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
if (Err) {
// Ignore the deletion on conflict.
llvm::errs() << "Failed to add header deletion replacement for "
- << Header << ": " << llvm::toString(std::move(Err))
- << "\n";
+ << Header << ": " << toString(std::move(Err)) << "\n";
}
}
}
- llvm::SmallVector<StringRef, 4> Matches;
+ SmallVector<StringRef, 4> Matches;
for (const auto &R : HeaderInsertions) {
auto IncludeDirective = R.getReplacementText();
bool Matched =
@@ -3567,7 +3566,7 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
if (Replace) {
auto Err = Result.add(*Replace);
if (Err) {
- llvm::consumeError(std::move(Err));
+ consumeError(std::move(Err));
unsigned NewOffset =
Result.getShiftedCodePosition(Replace->getOffset());
auto Shifted = tooling::Replacement(FileName, NewOffset, 0,
@@ -3581,7 +3580,7 @@ fixCppIncludeInsertions(StringRef Code, const tooling::Replacements &Replaces,
} // anonymous namespace
-llvm::Expected<tooling::Replacements>
+Expected<tooling::Replacements>
cleanupAroundReplacements(StringRef Code, const tooling::Replacements &Replaces,
const FormatStyle &Style) {
// We need to use lambda function here since there are two versions of
@@ -3774,7 +3773,7 @@ reformat(const FormatStyle &Style, StringRef Code,
auto Err = NonNoOpFixes.add(Fix);
if (Err) {
llvm::errs() << "Error adding replacements : "
- << llvm::toString(std::move(Err)) << "\n";
+ << toString(std::move(Err)) << "\n";
}
}
}
@@ -3956,17 +3955,16 @@ loadAndParseConfigFile(StringRef ConfigFile, llvm::vfs::FileSystem *FS,
return Text;
}
-llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
- StringRef FallbackStyleName,
- StringRef Code, llvm::vfs::FileSystem *FS,
- bool AllowUnknownOptions) {
+Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
+ StringRef FallbackStyleName, StringRef Code,
+ llvm::vfs::FileSystem *FS,
+ bool AllowUnknownOptions) {
FormatStyle Style = getLLVMStyle(guessLanguage(FileName, Code));
FormatStyle FallbackStyle = getNoStyle();
if (!getPredefinedStyle(FallbackStyleName, Style.Language, &FallbackStyle))
return make_string_error("Invalid fallback style: " + FallbackStyleName);
- llvm::SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 1>
- ChildFormatTextToApply;
+ SmallVector<std::unique_ptr<llvm::MemoryBuffer>, 1> ChildFormatTextToApply;
if (StyleName.starts_with("{")) {
// Parse YAML/JSON style from the command line.
@@ -4041,7 +4039,7 @@ llvm::Expected<FormatStyle> getStyle(StringRef StyleName, StringRef FileName,
};
// Look for .clang-format/_clang-format file in the file's parent directories.
- llvm::SmallVector<std::string, 2> FilesToLookFor;
+ SmallVector<std::string, 2> FilesToLookFor;
FilesToLookFor.push_back(".clang-format");
FilesToLookFor.push_back("_clang-format");
diff --git a/clang/lib/Format/FormatTokenSource.h b/clang/lib/Format/FormatTokenSource.h
index 2b93f302d360..8f00e5f4582c 100644
--- a/clang/lib/Format/FormatTokenSource.h
+++ b/clang/lib/Format/FormatTokenSource.h
@@ -173,7 +173,7 @@ private:
return Next;
}
- void dbgToken(int Position, llvm::StringRef Indent = "") {
+ void dbgToken(int Position, StringRef Indent = "") {
FormatToken *Tok = Tokens[Position];
llvm::dbgs() << Indent << "[" << Position
<< "] Token: " << Tok->Tok.getName() << " / " << Tok->TokenText
diff --git a/clang/lib/Format/MacroExpander.cpp b/clang/lib/Format/MacroExpander.cpp
index 5a1cdd884c5e..5768ff37fefc 100644
--- a/clang/lib/Format/MacroExpander.cpp
+++ b/clang/lib/Format/MacroExpander.cpp
@@ -119,7 +119,7 @@ private:
};
MacroExpander::MacroExpander(
- const std::vector<std::string> &Macros, clang::SourceManager &SourceMgr,
+ const std::vector<std::string> &Macros, SourceManager &SourceMgr,
const FormatStyle &Style,
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
IdentifierTable &IdentTable)
@@ -134,7 +134,7 @@ MacroExpander::~MacroExpander() = default;
void MacroExpander::parseDefinition(const std::string &Macro) {
Buffers.push_back(
llvm::MemoryBuffer::getMemBufferCopy(Macro, "<scratch space>"));
- clang::FileID FID = SourceMgr.createFileID(Buffers.back()->getMemBufferRef());
+ FileID FID = SourceMgr.createFileID(Buffers.back()->getMemBufferRef());
FormatTokenLexer Lex(SourceMgr, FID, 0, Style, encoding::Encoding_UTF8,
Allocator, IdentTable);
const auto Tokens = Lex.lex();
@@ -150,20 +150,20 @@ void MacroExpander::parseDefinition(const std::string &Macro) {
}
}
-bool MacroExpander::defined(llvm::StringRef Name) const {
+bool MacroExpander::defined(StringRef Name) const {
return FunctionLike.contains(Name) || ObjectLike.contains(Name);
}
-bool MacroExpander::objectLike(llvm::StringRef Name) const {
+bool MacroExpander::objectLike(StringRef Name) const {
return ObjectLike.contains(Name);
}
-bool MacroExpander::hasArity(llvm::StringRef Name, unsigned Arity) const {
+bool MacroExpander::hasArity(StringRef Name, unsigned Arity) const {
auto it = FunctionLike.find(Name);
return it != FunctionLike.end() && it->second.contains(Arity);
}
-llvm::SmallVector<FormatToken *, 8>
+SmallVector<FormatToken *, 8>
MacroExpander::expand(FormatToken *ID,
std::optional<ArgsList> OptionalArgs) const {
if (OptionalArgs)
diff --git a/clang/lib/Format/Macros.h b/clang/lib/Format/Macros.h
index fb12d22299de..e05f734b0db8 100644
--- a/clang/lib/Format/Macros.h
+++ b/clang/lib/Format/Macros.h
@@ -79,7 +79,7 @@ struct UnwrappedLineNode;
///
class MacroExpander {
public:
- using ArgsList = llvm::ArrayRef<llvm::SmallVector<FormatToken *, 8>>;
+ using ArgsList = ArrayRef<SmallVector<FormatToken *, 8>>;
/// Construct a macro expander from a set of macro definitions.
/// Macro definitions must be encoded as UTF-8.
@@ -95,27 +95,27 @@ public:
/// Macros that cannot be parsed will be silently discarded.
///
MacroExpander(const std::vector<std::string> &Macros,
- clang::SourceManager &SourceMgr, const FormatStyle &Style,
+ SourceManager &SourceMgr, const FormatStyle &Style,
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator,
IdentifierTable &IdentTable);
~MacroExpander();
/// Returns whether any macro \p Name is defined, regardless of overloads.
- bool defined(llvm::StringRef Name) const;
+ bool defined(StringRef Name) const;
/// Returns whetherh there is an object-like overload, i.e. where the macro
/// has no arguments and should not consume subsequent parentheses.
- bool objectLike(llvm::StringRef Name) const;
+ bool objectLike(StringRef Name) const;
/// Returns whether macro \p Name provides an overload with the given arity.
- bool hasArity(llvm::StringRef Name, unsigned Arity) const;
+ bool hasArity(StringRef Name, unsigned Arity) const;
/// Returns the expanded stream of format tokens for \p ID, where
/// each element in \p Args is a positional argument to the macro call.
/// If \p Args is not set, the object-like overload is used.
/// If \p Args is set, the overload with the arity equal to \c Args.size() is
/// used.
- llvm::SmallVector<FormatToken *, 8>
+ SmallVector<FormatToken *, 8>
expand(FormatToken *ID, std::optional<ArgsList> OptionalArgs) const;
private:
@@ -124,7 +124,7 @@ private:
void parseDefinition(const std::string &Macro);
- clang::SourceManager &SourceMgr;
+ SourceManager &SourceMgr;
const FormatStyle &Style;
llvm::SpecificBumpPtrAllocator<FormatToken> &Allocator;
IdentifierTable &IdentTable;
@@ -260,7 +260,7 @@ private:
LineNode() = default;
LineNode(FormatToken *Tok) : Tok(Tok) {}
FormatToken *Tok = nullptr;
- llvm::SmallVector<std::unique_ptr<ReconstructedLine>> Children;
+ SmallVector<std::unique_ptr<ReconstructedLine>> Children;
};
// Line in which we build up the resulting unwrapped line.
@@ -269,7 +269,7 @@ private:
struct ReconstructedLine {
explicit ReconstructedLine(unsigned Level) : Level(Level) {}
unsigned Level;
- llvm::SmallVector<std::unique_ptr<LineNode>> Tokens;
+ SmallVector<std::unique_ptr<LineNode>> Tokens;
};
// The line in which we collect the resulting reconstructed output.
@@ -285,7 +285,7 @@ private:
// Stack of currently "open" lines, where each line's predecessor's last
// token is the parent token for that line.
- llvm::SmallVector<ReconstructedLine *> ActiveReconstructedLines;
+ SmallVector<ReconstructedLine *> ActiveReconstructedLines;
// Maps from the expanded token to the token that takes its place in the
// reconstructed token stream in terms of parent-child relationships.
@@ -325,7 +325,7 @@ private:
};
// Stack of macro calls for which we're in the middle of an expansion.
- llvm::SmallVector<Expansion> ActiveExpansions;
+ SmallVector<Expansion> ActiveExpansions;
struct MacroCallState {
MacroCallState(ReconstructedLine *Line, FormatToken *ParentLastToken,
@@ -368,7 +368,7 @@ private:
// |- ,
// | \- <argument>
// \- )
- llvm::SmallVector<MacroCallState> MacroCallStructure;
+ SmallVector<MacroCallState> MacroCallStructure;
// Maps from identifier of the macro call to an unwrapped line containing
// all tokens of the macro call.
diff --git a/clang/lib/Format/SortJavaScriptImports.cpp b/clang/lib/Format/SortJavaScriptImports.cpp
index 1a6a1b19e702..1acce26ff279 100644
--- a/clang/lib/Format/SortJavaScriptImports.cpp
+++ b/clang/lib/Format/SortJavaScriptImports.cpp
@@ -34,8 +34,6 @@ namespace format {
class FormatTokenLexer;
-using clang::format::FormatStyle;
-
// An imported symbol in a JavaScript ES6 import/export, possibly aliased.
struct JsImportedSymbol {
StringRef Symbol;
@@ -178,7 +176,7 @@ public:
}
}
}
- llvm::StringRef PreviousText = getSourceText(InsertionPoint);
+ StringRef PreviousText = getSourceText(InsertionPoint);
if (ReferencesText == PreviousText)
return {Result, 0};
@@ -209,7 +207,7 @@ public:
// FIXME: better error handling. For now, just print error message and skip
// the replacement for the release version.
if (Err) {
- llvm::errs() << llvm::toString(std::move(Err)) << "\n";
+ llvm::errs() << toString(std::move(Err)) << "\n";
assert(false);
}
@@ -276,7 +274,7 @@ private:
SortChunk.push_back(*Start);
++Start;
}
- llvm::stable_sort(SortChunk);
+ stable_sort(SortChunk);
mergeModuleReferences(SortChunk);
ReferencesSorted.insert(ReferencesSorted.end(), SortChunk.begin(),
SortChunk.end());
@@ -334,10 +332,10 @@ private:
// Sort the individual symbols within the import.
// E.g. `import {b, a} from 'x';` -> `import {a, b} from 'x';`
SmallVector<JsImportedSymbol, 1> Symbols = Reference.Symbols;
- llvm::stable_sort(
- Symbols, [&](const JsImportedSymbol &LHS, const JsImportedSymbol &RHS) {
- return LHS.Symbol.compare_insensitive(RHS.Symbol) < 0;
- });
+ stable_sort(Symbols,
+ [&](const JsImportedSymbol &LHS, const JsImportedSymbol &RHS) {
+ return LHS.Symbol.compare_insensitive(RHS.Symbol) < 0;
+ });
if (!Reference.SymbolsMerged && Symbols == Reference.Symbols) {
// Symbols didn't change, just emit the entire module reference.
StringRef ReferenceStmt = getSourceText(Reference.Range);
@@ -349,7 +347,7 @@ private:
// ... then the references in order ...
if (!Symbols.empty()) {
Buffer += getSourceText(Symbols.front().Range);
- for (const JsImportedSymbol &Symbol : llvm::drop_begin(Symbols)) {
+ for (const JsImportedSymbol &Symbol : drop_begin(Symbols)) {
Buffer += ",";
Buffer += getSourceText(Symbol.Range);
}
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index b15a87327240..b6f7567adc14 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -1189,12 +1189,6 @@ void UnwrappedLineParser::parsePPDefine() {
return;
}
- if (FormatTok->is(tok::identifier) &&
- Tokens->peekNextToken()->is(tok::colon)) {
- nextToken();
- nextToken();
- }
-
// Errors during a preprocessor directive can only affect the layout of the
// preprocessor directive, and thus we ignore them. An alternative approach
// would be to use the same approach we use on the file level (no
@@ -1416,6 +1410,13 @@ void UnwrappedLineParser::readTokenWithJavaScriptASI() {
}
}
+static bool isAltOperator(const FormatToken &Tok) {
+ return isalpha(Tok.TokenText[0]) &&
+ Tok.isOneOf(tok::ampamp, tok::ampequal, tok::amp, tok::pipe,
+ tok::tilde, tok::exclaim, tok::exclaimequal, tok::pipepipe,
+ tok::pipeequal, tok::caret, tok::caretequal);
+}
+
void UnwrappedLineParser::parseStructuralElement(
const FormatToken *OpeningBrace, IfStmtKind *IfKind,
FormatToken **IfLeftBrace, bool *HasDoWhile, bool *HasLabel) {
@@ -1681,7 +1682,8 @@ void UnwrappedLineParser::parseStructuralElement(
if (!Style.isJavaScript() && !Style.isVerilog() && !Style.isTableGen() &&
Tokens->peekNextToken()->is(tok::colon) && !Line->MustBeDeclaration) {
nextToken();
- Line->Tokens.begin()->Tok->MustBreakBefore = true;
+ if (!Line->InMacroBody || CurrentLines->size() > 1)
+ Line->Tokens.begin()->Tok->MustBreakBefore = true;
FormatTok->setFinalizedType(TT_GotoLabelColon);
parseLabel(!Style.IndentGotoLabels);
if (HasLabel)
@@ -1694,9 +1696,15 @@ void UnwrappedLineParser::parseStructuralElement(
break;
}
- const bool InRequiresExpression =
- OpeningBrace && OpeningBrace->is(TT_RequiresExpressionLBrace);
- do {
+ for (const bool InRequiresExpression =
+ OpeningBrace && OpeningBrace->is(TT_RequiresExpressionLBrace);
+ !eof();) {
+ if (IsCpp && isAltOperator(*FormatTok)) {
+ if (auto *Next = Tokens->peekNextToken(/*SkipComment=*/true);
+ Next && Next->isBinaryOperator()) {
+ FormatTok->Tok.setKind(tok::identifier);
+ }
+ }
const FormatToken *Previous = FormatTok->Previous;
switch (FormatTok->Tok.getKind()) {
case tok::at:
@@ -2127,7 +2135,7 @@ void UnwrappedLineParser::parseStructuralElement(
nextToken();
break;
}
- } while (!eof());
+ }
}
bool UnwrappedLineParser::tryToParsePropertyAccessor() {
diff --git a/clang/lib/Frontend/InitPreprocessor.cpp b/clang/lib/Frontend/InitPreprocessor.cpp
index c1d209466ffe..68760e00003e 100644
--- a/clang/lib/Frontend/InitPreprocessor.cpp
+++ b/clang/lib/Frontend/InitPreprocessor.cpp
@@ -1006,6 +1006,8 @@ static void InitializePredefinedMacros(const TargetInfo &TI,
else if (LangOpts.hasDWARFExceptions() &&
(TI.getTriple().isThumb() || TI.getTriple().isARM()))
Builder.defineMacro("__ARM_DWARF_EH__");
+ else if (LangOpts.hasWasmExceptions() && TI.getTriple().isWasm())
+ Builder.defineMacro("__WASM_EXCEPTIONS__");
if (LangOpts.Deprecated)
Builder.defineMacro("__DEPRECATED");
diff --git a/clang/lib/Index/IndexDecl.cpp b/clang/lib/Index/IndexDecl.cpp
index 8eb88f5a1e94..058f4aef918a 100644
--- a/clang/lib/Index/IndexDecl.cpp
+++ b/clang/lib/Index/IndexDecl.cpp
@@ -703,7 +703,8 @@ public:
IndexCtx.handleDecl(TP);
if (const auto *TTP = dyn_cast<TemplateTypeParmDecl>(TP)) {
if (TTP->hasDefaultArgument())
- IndexCtx.indexTypeSourceInfo(TTP->getDefaultArgumentInfo(), Parent);
+ handleTemplateArgumentLoc(TTP->getDefaultArgument(), Parent,
+ TP->getLexicalDeclContext());
if (auto *C = TTP->getTypeConstraint())
IndexCtx.handleReference(C->getNamedConcept(), C->getConceptNameLoc(),
Parent, TTP->getLexicalDeclContext());
diff --git a/clang/lib/Parse/ParseDecl.cpp b/clang/lib/Parse/ParseDecl.cpp
index 445d3fd66e38..651ef7b78c94 100644
--- a/clang/lib/Parse/ParseDecl.cpp
+++ b/clang/lib/Parse/ParseDecl.cpp
@@ -1923,9 +1923,8 @@ void Parser::DiagnoseCXX11AttributeExtension(ParsedAttributes &Attrs) {
// variable.
// This function moves attributes that should apply to the type off DS to Attrs.
void Parser::stripTypeAttributesOffDeclSpec(ParsedAttributes &Attrs,
- DeclSpec &DS,
- Sema::TagUseKind TUK) {
- if (TUK == Sema::TUK_Reference)
+ DeclSpec &DS, TagUseKind TUK) {
+ if (TUK == TagUseKind::Reference)
return;
llvm::SmallVector<ParsedAttr *, 1> ToBeMoved;
@@ -5287,9 +5286,9 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// enum foo {..}; void bar() { enum foo; } <- new foo in bar.
// enum foo {..}; void bar() { enum foo x; } <- use of old foo.
//
- Sema::TagUseKind TUK;
+ TagUseKind TUK;
if (AllowEnumSpecifier == AllowDefiningTypeSpec::No)
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
else if (Tok.is(tok::l_brace)) {
if (DS.isFriendSpecified()) {
Diag(Tok.getLocation(), diag::err_friend_decl_defines_type)
@@ -5301,9 +5300,9 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
ScopedEnumKWLoc = SourceLocation();
IsScopedUsingClassTag = false;
BaseType = TypeResult();
- TUK = Sema::TUK_Friend;
+ TUK = TagUseKind::Friend;
} else {
- TUK = Sema::TUK_Definition;
+ TUK = TagUseKind::Definition;
}
} else if (!isTypeSpecifier(DSC) &&
(Tok.is(tok::semi) ||
@@ -5312,7 +5311,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
// An opaque-enum-declaration is required to be standalone (no preceding or
// following tokens in the declaration). Sema enforces this separately by
// diagnosing anything else in the DeclSpec.
- TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
+ TUK = DS.isFriendSpecified() ? TagUseKind::Friend : TagUseKind::Declaration;
if (Tok.isNot(tok::semi)) {
// A semicolon was missing after this declaration. Diagnose and recover.
ExpectAndConsume(tok::semi, diag::err_expected_after, "enum");
@@ -5320,21 +5319,21 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
Tok.setKind(tok::semi);
}
} else {
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
}
bool IsElaboratedTypeSpecifier =
- TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend;
+ TUK == TagUseKind::Reference || TUK == TagUseKind::Friend;
// If this is an elaborated type specifier nested in a larger declaration,
// and we delayed diagnostics before, just merge them into the current pool.
- if (TUK == Sema::TUK_Reference && shouldDelayDiagsInTag) {
+ if (TUK == TagUseKind::Reference && shouldDelayDiagsInTag) {
diagsFromTag.redelay();
}
MultiTemplateParamsArg TParams;
if (TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate &&
- TUK != Sema::TUK_Reference) {
+ TUK != TagUseKind::Reference) {
if (!getLangOpts().CPlusPlus11 || !SS.isSet()) {
// Skip the rest of this declarator, up until the comma or semicolon.
Diag(Tok, diag::err_enum_template);
@@ -5355,7 +5354,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
SS.setTemplateParamLists(TParams);
}
- if (!Name && TUK != Sema::TUK_Definition) {
+ if (!Name && TUK != TagUseKind::Definition) {
Diag(Tok, diag::err_enumerator_unnamed_no_def);
DS.SetTypeSpecError();
@@ -5388,7 +5387,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
stripTypeAttributesOffDeclSpec(attrs, DS, TUK);
SkipBodyInfo SkipBody;
- if (!Name && TUK == Sema::TUK_Definition && Tok.is(tok::l_brace) &&
+ if (!Name && TUK == TagUseKind::Definition && Tok.is(tok::l_brace) &&
NextToken().is(tok::identifier))
SkipBody = Actions.shouldSkipAnonEnumBody(getCurScope(),
NextToken().getIdentifierInfo(),
@@ -5409,7 +5408,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
OffsetOfState, &SkipBody).get();
if (SkipBody.ShouldSkip) {
- assert(TUK == Sema::TUK_Definition && "can only skip a definition");
+ assert(TUK == TagUseKind::Definition && "can only skip a definition");
BalancedDelimiterTracker T(*this, tok::l_brace);
T.consumeOpen();
@@ -5451,7 +5450,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
if (!TagDecl) {
// The action failed to produce an enumeration tag. If this is a
// definition, consume the entire definition.
- if (Tok.is(tok::l_brace) && TUK != Sema::TUK_Reference) {
+ if (Tok.is(tok::l_brace) && TUK != TagUseKind::Reference) {
ConsumeBrace();
SkipUntil(tok::r_brace, StopAtSemi);
}
@@ -5460,7 +5459,7 @@ void Parser::ParseEnumSpecifier(SourceLocation StartLoc, DeclSpec &DS,
return;
}
- if (Tok.is(tok::l_brace) && TUK == Sema::TUK_Definition) {
+ if (Tok.is(tok::l_brace) && TUK == TagUseKind::Definition) {
Decl *D = SkipBody.CheckSameAsPrevious ? SkipBody.New : TagDecl;
ParseEnumBody(StartLoc, D);
if (SkipBody.CheckSameAsPrevious &&
diff --git a/clang/lib/Parse/ParseDeclCXX.cpp b/clang/lib/Parse/ParseDeclCXX.cpp
index 5eaec2b621e6..805651e4ab06 100644
--- a/clang/lib/Parse/ParseDeclCXX.cpp
+++ b/clang/lib/Parse/ParseDeclCXX.cpp
@@ -1961,11 +1961,11 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
MaybeParseCXX11Attributes(Attributes);
const PrintingPolicy &Policy = Actions.getASTContext().getPrintingPolicy();
- Sema::TagUseKind TUK;
+ TagUseKind TUK;
if (isDefiningTypeSpecifierContext(DSC, getLangOpts().CPlusPlus) ==
AllowDefiningTypeSpec::No ||
(getLangOpts().OpenMP && OpenMPDirectiveParsing))
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
else if (Tok.is(tok::l_brace) ||
(DSC != DeclSpecContext::DSC_association &&
getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
@@ -1980,10 +1980,10 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// Skip everything up to the semicolon, so that this looks like a proper
// friend class (or template thereof) declaration.
SkipUntil(tok::semi, StopBeforeMatch);
- TUK = Sema::TUK_Friend;
+ TUK = TagUseKind::Friend;
} else {
// Okay, this is a class definition.
- TUK = Sema::TUK_Definition;
+ TUK = TagUseKind::Definition;
}
} else if (isClassCompatibleKeyword() &&
(NextToken().is(tok::l_square) ||
@@ -2024,15 +2024,15 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
}
if (Tok.isOneOf(tok::l_brace, tok::colon))
- TUK = Sema::TUK_Definition;
+ TUK = TagUseKind::Definition;
else
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
PA.Revert();
} else if (!isTypeSpecifier(DSC) &&
(Tok.is(tok::semi) ||
(Tok.isAtStartOfLine() && !isValidAfterTypeSpecifier(false)))) {
- TUK = DS.isFriendSpecified() ? Sema::TUK_Friend : Sema::TUK_Declaration;
+ TUK = DS.isFriendSpecified() ? TagUseKind::Friend : TagUseKind::Declaration;
if (Tok.isNot(tok::semi)) {
const PrintingPolicy &PPol = Actions.getASTContext().getPrintingPolicy();
// A semicolon was missing after this declaration. Diagnose and recover.
@@ -2042,11 +2042,11 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
Tok.setKind(tok::semi);
}
} else
- TUK = Sema::TUK_Reference;
+ TUK = TagUseKind::Reference;
// Forbid misplaced attributes. In cases of a reference, we pass attributes
// to caller to handle.
- if (TUK != Sema::TUK_Reference) {
+ if (TUK != TagUseKind::Reference) {
// If this is not a reference, then the only possible
// valid place for C++11 attributes to appear here
// is between class-key and class-name. If there are
@@ -2072,7 +2072,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (!Name && !TemplateId &&
(DS.getTypeSpecType() == DeclSpec::TST_error ||
- TUK != Sema::TUK_Definition)) {
+ TUK != TagUseKind::Definition)) {
if (DS.getTypeSpecType() != DeclSpec::TST_error) {
// We have a declaration or reference to an anonymous class.
Diag(StartLoc, diag::err_anon_type_definition)
@@ -2082,7 +2082,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// If we are parsing a definition and stop at a base-clause, continue on
// until the semicolon. Continuing from the comma will just trick us into
// thinking we are seeing a variable declaration.
- if (TUK == Sema::TUK_Definition && Tok.is(tok::colon))
+ if (TUK == TagUseKind::Definition && Tok.is(tok::colon))
SkipUntil(tok::semi, StopBeforeMatch);
else
SkipUntil(tok::comma, StopAtSemi);
@@ -2103,7 +2103,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
if (TemplateId->isInvalid()) {
// Can't build the declaration.
} else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
- TUK == Sema::TUK_Declaration) {
+ TUK == TagUseKind::Declaration) {
// This is an explicit instantiation of a class template.
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
diag::err_keyword_not_allowed,
@@ -2119,8 +2119,8 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// they have template headers, in which case they're ill-formed
// (FIXME: "template <class T> friend class A<T>::B<int>;").
// We diagnose this error in ActOnClassTemplateSpecialization.
- } else if (TUK == Sema::TUK_Reference ||
- (TUK == Sema::TUK_Friend &&
+ } else if (TUK == TagUseKind::Reference ||
+ (TUK == TagUseKind::Friend &&
TemplateInfo.Kind == ParsedTemplateInfo::NonTemplate)) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
diag::err_keyword_not_allowed,
@@ -2145,10 +2145,10 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// It this is friend declaration however, since it cannot have a
// template header, it is most likely that the user meant to
// remove the 'template' keyword.
- assert((TUK == Sema::TUK_Definition || TUK == Sema::TUK_Friend) &&
+ assert((TUK == TagUseKind::Definition || TUK == TagUseKind::Friend) &&
"Expected a definition here");
- if (TUK == Sema::TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
Diag(DS.getFriendSpecLoc(), diag::err_friend_explicit_instantiation);
TemplateParams = nullptr;
} else {
@@ -2179,7 +2179,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
&SkipBody);
}
} else if (TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation &&
- TUK == Sema::TUK_Declaration) {
+ TUK == TagUseKind::Declaration) {
// Explicit instantiation of a member of a class template
// specialization, e.g.,
//
@@ -2190,7 +2190,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
TagOrTempResult = Actions.ActOnExplicitInstantiation(
getCurScope(), TemplateInfo.ExternLoc, TemplateInfo.TemplateLoc,
TagType, StartLoc, SS, Name, NameLoc, attrs);
- } else if (TUK == Sema::TUK_Friend &&
+ } else if (TUK == TagUseKind::Friend &&
TemplateInfo.Kind != ParsedTemplateInfo::NonTemplate) {
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
diag::err_keyword_not_allowed,
@@ -2202,12 +2202,12 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
MultiTemplateParamsArg(TemplateParams ? &(*TemplateParams)[0] : nullptr,
TemplateParams ? TemplateParams->size() : 0));
} else {
- if (TUK != Sema::TUK_Declaration && TUK != Sema::TUK_Definition)
+ if (TUK != TagUseKind::Declaration && TUK != TagUseKind::Definition)
ProhibitCXX11Attributes(attrs, diag::err_attributes_not_allowed,
diag::err_keyword_not_allowed,
/* DiagnoseEmptyAttrs=*/true);
- if (TUK == Sema::TUK_Definition &&
+ if (TUK == TagUseKind::Definition &&
TemplateInfo.Kind == ParsedTemplateInfo::ExplicitInstantiation) {
// If the declarator-id is not a template-id, issue a diagnostic and
// recover by ignoring the 'template' keyword.
@@ -2222,7 +2222,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// reference. For example, we don't need the template parameters here:
// template <class T> class A *makeA(T t);
MultiTemplateParamsArg TParams;
- if (TUK != Sema::TUK_Reference && TemplateParams)
+ if (TUK != TagUseKind::Reference && TemplateParams)
TParams =
MultiTemplateParamsArg(&(*TemplateParams)[0], TemplateParams->size());
@@ -2241,7 +2241,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// If ActOnTag said the type was dependent, try again with the
// less common call.
if (IsDependent) {
- assert(TUK == Sema::TUK_Reference || TUK == Sema::TUK_Friend);
+ assert(TUK == TagUseKind::Reference || TUK == TagUseKind::Friend);
TypeResult = Actions.ActOnDependentTag(getCurScope(), TagType, TUK, SS,
Name, StartLoc, NameLoc);
}
@@ -2252,13 +2252,13 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
// just merge them into the current pool.
if (shouldDelayDiagsInTag) {
diagsFromTag.done();
- if (TUK == Sema::TUK_Reference &&
+ if (TUK == TagUseKind::Reference &&
TemplateInfo.Kind == ParsedTemplateInfo::Template)
diagsFromTag.redelay();
}
// If there is a body, parse it and inform the actions module.
- if (TUK == Sema::TUK_Definition) {
+ if (TUK == TagUseKind::Definition) {
assert(Tok.is(tok::l_brace) ||
(getLangOpts().CPlusPlus && Tok.is(tok::colon)) ||
isClassCompatibleKeyword());
@@ -2316,7 +2316,7 @@ void Parser::ParseClassSpecifier(tok::TokenKind TagTokKind,
//
// After a type-specifier, we don't expect a semicolon. This only happens in
// C, since definitions are not permitted in this context in C++.
- if (TUK == Sema::TUK_Definition &&
+ if (TUK == TagUseKind::Definition &&
(getLangOpts().CPlusPlus || !isTypeSpecifier(DSC)) &&
(TemplateInfo.Kind || !isValidAfterTypeSpecifier(false))) {
if (Tok.isNot(tok::semi)) {
diff --git a/clang/lib/Parse/ParseOpenACC.cpp b/clang/lib/Parse/ParseOpenACC.cpp
index 5db3036b0003..e9c60f76165b 100644
--- a/clang/lib/Parse/ParseOpenACC.cpp
+++ b/clang/lib/Parse/ParseOpenACC.cpp
@@ -920,7 +920,8 @@ Parser::OpenACCClauseParseResult Parser::ParseOpenACCClauseParams(
case OpenACCClauseKind::PresentOrCopyIn: {
bool IsReadOnly = tryParseAndConsumeSpecialTokenKind(
*this, OpenACCSpecialTokenKind::ReadOnly, ClauseKind);
- ParsedClause.setVarListDetails(ParseOpenACCVarList(), IsReadOnly,
+ ParsedClause.setVarListDetails(ParseOpenACCVarList(ClauseKind),
+ IsReadOnly,
/*IsZero=*/false);
break;
}
@@ -932,16 +933,17 @@ Parser::OpenACCClauseParseResult Parser::ParseOpenACCClauseParams(
case OpenACCClauseKind::PresentOrCopyOut: {
bool IsZero = tryParseAndConsumeSpecialTokenKind(
*this, OpenACCSpecialTokenKind::Zero, ClauseKind);
- ParsedClause.setVarListDetails(ParseOpenACCVarList(),
+ ParsedClause.setVarListDetails(ParseOpenACCVarList(ClauseKind),
/*IsReadOnly=*/false, IsZero);
break;
}
- case OpenACCClauseKind::Reduction:
+ case OpenACCClauseKind::Reduction: {
// If we're missing a clause-kind (or it is invalid), see if we can parse
// the var-list anyway.
- ParseReductionOperator(*this);
- ParseOpenACCVarList();
+ OpenACCReductionOperator Op = ParseReductionOperator(*this);
+ ParsedClause.setReductionDetails(Op, ParseOpenACCVarList(ClauseKind));
break;
+ }
case OpenACCClauseKind::Self:
// The 'self' clause is a var-list instead of a 'condition' in the case of
// the 'update' clause, so we have to handle it here. U se an assert to
@@ -955,11 +957,11 @@ Parser::OpenACCClauseParseResult Parser::ParseOpenACCClauseParams(
case OpenACCClauseKind::Host:
case OpenACCClauseKind::Link:
case OpenACCClauseKind::UseDevice:
- ParseOpenACCVarList();
+ ParseOpenACCVarList(ClauseKind);
break;
case OpenACCClauseKind::Attach:
case OpenACCClauseKind::DevicePtr:
- ParsedClause.setVarListDetails(ParseOpenACCVarList(),
+ ParsedClause.setVarListDetails(ParseOpenACCVarList(ClauseKind),
/*IsReadOnly=*/false, /*IsZero=*/false);
break;
case OpenACCClauseKind::Copy:
@@ -969,7 +971,7 @@ Parser::OpenACCClauseParseResult Parser::ParseOpenACCClauseParams(
case OpenACCClauseKind::NoCreate:
case OpenACCClauseKind::Present:
case OpenACCClauseKind::Private:
- ParsedClause.setVarListDetails(ParseOpenACCVarList(),
+ ParsedClause.setVarListDetails(ParseOpenACCVarList(ClauseKind),
/*IsReadOnly=*/false, /*IsZero=*/false);
break;
case OpenACCClauseKind::Collapse: {
@@ -1278,7 +1280,7 @@ ExprResult Parser::ParseOpenACCBindClauseArgument() {
/// - an array element
/// - a member of a composite variable
/// - a common block name between slashes (fortran only)
-Parser::OpenACCVarParseResult Parser::ParseOpenACCVar() {
+Parser::OpenACCVarParseResult Parser::ParseOpenACCVar(OpenACCClauseKind CK) {
OpenACCArraySectionRAII ArraySections(*this);
ExprResult Res = ParseAssignmentExpression();
@@ -1289,15 +1291,15 @@ Parser::OpenACCVarParseResult Parser::ParseOpenACCVar() {
if (!Res.isUsable())
return {Res, OpenACCParseCanContinue::Can};
- Res = getActions().OpenACC().ActOnVar(Res.get());
+ Res = getActions().OpenACC().ActOnVar(CK, Res.get());
return {Res, OpenACCParseCanContinue::Can};
}
-llvm::SmallVector<Expr *> Parser::ParseOpenACCVarList() {
+llvm::SmallVector<Expr *> Parser::ParseOpenACCVarList(OpenACCClauseKind CK) {
llvm::SmallVector<Expr *> Vars;
- auto [Res, CanContinue] = ParseOpenACCVar();
+ auto [Res, CanContinue] = ParseOpenACCVar(CK);
if (Res.isUsable()) {
Vars.push_back(Res.get());
} else if (CanContinue == OpenACCParseCanContinue::Cannot) {
@@ -1308,7 +1310,7 @@ llvm::SmallVector<Expr *> Parser::ParseOpenACCVarList() {
while (!getCurToken().isOneOf(tok::r_paren, tok::annot_pragma_openacc_end)) {
ExpectAndConsume(tok::comma);
- auto [Res, CanContinue] = ParseOpenACCVar();
+ auto [Res, CanContinue] = ParseOpenACCVar(CK);
if (Res.isUsable()) {
Vars.push_back(Res.get());
@@ -1342,7 +1344,7 @@ void Parser::ParseOpenACCCacheVarList() {
// ParseOpenACCVarList should leave us before a r-paren, so no need to skip
// anything here.
- ParseOpenACCVarList();
+ ParseOpenACCVarList(OpenACCClauseKind::Invalid);
}
Parser::OpenACCDirectiveParseInfo Parser::ParseOpenACCDirective() {
diff --git a/clang/lib/Sema/CMakeLists.txt b/clang/lib/Sema/CMakeLists.txt
index 58e0a3b9679b..6b7742cae2db 100644
--- a/clang/lib/Sema/CMakeLists.txt
+++ b/clang/lib/Sema/CMakeLists.txt
@@ -60,7 +60,7 @@ add_clang_library(clangSema
SemaOpenMP.cpp
SemaOverload.cpp
SemaPseudoObject.cpp
- SemaRISCVVectorLookup.cpp
+ SemaRISCV.cpp
SemaStmt.cpp
SemaStmtAsm.cpp
SemaStmtAttr.cpp
diff --git a/clang/lib/Sema/HLSLExternalSemaSource.cpp b/clang/lib/Sema/HLSLExternalSemaSource.cpp
index bb283c54b3d2..fa8c852ea9e9 100644
--- a/clang/lib/Sema/HLSLExternalSemaSource.cpp
+++ b/clang/lib/Sema/HLSLExternalSemaSource.cpp
@@ -308,17 +308,18 @@ struct BuiltinTypeDeclBuilder {
return *this;
}
- TemplateParameterListBuilder addTemplateArgumentList();
- BuiltinTypeDeclBuilder &addSimpleTemplateParams(ArrayRef<StringRef> Names);
+ TemplateParameterListBuilder addTemplateArgumentList(Sema &S);
+ BuiltinTypeDeclBuilder &addSimpleTemplateParams(Sema &S,
+ ArrayRef<StringRef> Names);
};
struct TemplateParameterListBuilder {
BuiltinTypeDeclBuilder &Builder;
- ASTContext &AST;
+ Sema &S;
llvm::SmallVector<NamedDecl *> Params;
- TemplateParameterListBuilder(BuiltinTypeDeclBuilder &RB)
- : Builder(RB), AST(RB.Record->getASTContext()) {}
+ TemplateParameterListBuilder(Sema &S, BuiltinTypeDeclBuilder &RB)
+ : Builder(RB), S(S) {}
~TemplateParameterListBuilder() { finalizeTemplateArgs(); }
@@ -328,12 +329,15 @@ struct TemplateParameterListBuilder {
return *this;
unsigned Position = static_cast<unsigned>(Params.size());
auto *Decl = TemplateTypeParmDecl::Create(
- AST, Builder.Record->getDeclContext(), SourceLocation(),
+ S.Context, Builder.Record->getDeclContext(), SourceLocation(),
SourceLocation(), /* TemplateDepth */ 0, Position,
- &AST.Idents.get(Name, tok::TokenKind::identifier), /* Typename */ false,
+ &S.Context.Idents.get(Name, tok::TokenKind::identifier),
+ /* Typename */ false,
/* ParameterPack */ false);
if (!DefaultValue.isNull())
- Decl->setDefaultArgument(AST.getTrivialTypeSourceInfo(DefaultValue));
+ Decl->setDefaultArgument(
+ S.Context, S.getTrivialTemplateArgumentLoc(DefaultValue, QualType(),
+ SourceLocation()));
Params.emplace_back(Decl);
return *this;
@@ -342,11 +346,11 @@ struct TemplateParameterListBuilder {
BuiltinTypeDeclBuilder &finalizeTemplateArgs() {
if (Params.empty())
return Builder;
- auto *ParamList =
- TemplateParameterList::Create(AST, SourceLocation(), SourceLocation(),
- Params, SourceLocation(), nullptr);
+ auto *ParamList = TemplateParameterList::Create(S.Context, SourceLocation(),
+ SourceLocation(), Params,
+ SourceLocation(), nullptr);
Builder.Template = ClassTemplateDecl::Create(
- AST, Builder.Record->getDeclContext(), SourceLocation(),
+ S.Context, Builder.Record->getDeclContext(), SourceLocation(),
DeclarationName(Builder.Record->getIdentifier()), ParamList,
Builder.Record);
Builder.Record->setDescribedClassTemplate(Builder.Template);
@@ -359,20 +363,22 @@ struct TemplateParameterListBuilder {
Params.clear();
QualType T = Builder.Template->getInjectedClassNameSpecialization();
- T = AST.getInjectedClassNameType(Builder.Record, T);
+ T = S.Context.getInjectedClassNameType(Builder.Record, T);
return Builder;
}
};
} // namespace
-TemplateParameterListBuilder BuiltinTypeDeclBuilder::addTemplateArgumentList() {
- return TemplateParameterListBuilder(*this);
+TemplateParameterListBuilder
+BuiltinTypeDeclBuilder::addTemplateArgumentList(Sema &S) {
+ return TemplateParameterListBuilder(S, *this);
}
BuiltinTypeDeclBuilder &
-BuiltinTypeDeclBuilder::addSimpleTemplateParams(ArrayRef<StringRef> Names) {
- TemplateParameterListBuilder Builder = this->addTemplateArgumentList();
+BuiltinTypeDeclBuilder::addSimpleTemplateParams(Sema &S,
+ ArrayRef<StringRef> Names) {
+ TemplateParameterListBuilder Builder = this->addTemplateArgumentList(S);
for (StringRef Name : Names)
Builder.addTypeParameter(Name);
return Builder.finalizeTemplateArgs();
@@ -426,7 +432,9 @@ void HLSLExternalSemaSource::defineHLSLVectorAlias() {
auto *TypeParam = TemplateTypeParmDecl::Create(
AST, HLSLNamespace, SourceLocation(), SourceLocation(), 0, 0,
&AST.Idents.get("element", tok::TokenKind::identifier), false, false);
- TypeParam->setDefaultArgument(AST.getTrivialTypeSourceInfo(AST.FloatTy));
+ TypeParam->setDefaultArgument(
+ AST, SemaPtr->getTrivialTemplateArgumentLoc(
+ TemplateArgument(AST.FloatTy), QualType(), SourceLocation()));
TemplateParams.emplace_back(TypeParam);
@@ -492,7 +500,7 @@ static BuiltinTypeDeclBuilder setupBufferType(CXXRecordDecl *Decl, Sema &S,
void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
CXXRecordDecl *Decl;
Decl = BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "RWBuffer")
- .addSimpleTemplateParams({"element_type"})
+ .addSimpleTemplateParams(*SemaPtr, {"element_type"})
.Record;
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV,
@@ -503,7 +511,7 @@ void HLSLExternalSemaSource::defineHLSLTypesWithForwardDeclarations() {
Decl =
BuiltinTypeDeclBuilder(*SemaPtr, HLSLNamespace, "RasterizerOrderedBuffer")
- .addSimpleTemplateParams({"element_type"})
+ .addSimpleTemplateParams(*SemaPtr, {"element_type"})
.Record;
onCompletion(Decl, [this](CXXRecordDecl *Decl) {
setupBufferType(Decl, *SemaPtr, ResourceClass::UAV,
diff --git a/clang/lib/Sema/Sema.cpp b/clang/lib/Sema/Sema.cpp
index f847c49920cf..2c5774da3f66 100644
--- a/clang/lib/Sema/Sema.cpp
+++ b/clang/lib/Sema/Sema.cpp
@@ -49,6 +49,7 @@
#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/SemaOpenACC.h"
#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaPseudoObject.h"
#include "clang/Sema/SemaSYCL.h"
#include "clang/Sema/TemplateDeduction.h"
#include "clang/Sema/TemplateInstCallback.h"
@@ -210,6 +211,7 @@ Sema::Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
ObjCPtr(std::make_unique<SemaObjC>(*this)),
OpenACCPtr(std::make_unique<SemaOpenACC>(*this)),
OpenMPPtr(std::make_unique<SemaOpenMP>(*this)),
+ PseudoObjectPtr(std::make_unique<SemaPseudoObject>(*this)),
SYCLPtr(std::make_unique<SemaSYCL>(*this)),
MSPointerToMemberRepresentationMethod(
LangOpts.getMSPointerToMemberRepresentationMethod()),
diff --git a/clang/lib/Sema/SemaCXXScopeSpec.cpp b/clang/lib/Sema/SemaCXXScopeSpec.cpp
index fca5bd131bbc..c405fbc0aa42 100644
--- a/clang/lib/Sema/SemaCXXScopeSpec.cpp
+++ b/clang/lib/Sema/SemaCXXScopeSpec.cpp
@@ -796,6 +796,14 @@ bool Sema::BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo,
Diag(IdInfo.IdentifierLoc,
diag::ext_undeclared_unqual_id_with_dependent_base)
<< IdInfo.Identifier << ContainingClass;
+ // Fake up a nested-name-specifier that starts with the
+ // injected-class-name of the enclosing class.
+ QualType T = Context.getTypeDeclType(ContainingClass);
+ TypeLocBuilder TLB;
+ TLB.pushTrivial(Context, T, IdInfo.IdentifierLoc);
+ SS.Extend(Context, /*TemplateKWLoc=*/SourceLocation(),
+ TLB.getTypeLocInContext(Context, T), IdInfo.IdentifierLoc);
+ // Add the identifier to form a dependent name.
SS.Extend(Context, IdInfo.Identifier, IdInfo.IdentifierLoc,
IdInfo.CCLoc);
return false;
diff --git a/clang/lib/Sema/SemaDecl.cpp b/clang/lib/Sema/SemaDecl.cpp
index 557fe10619c3..8049ddfed872 100644
--- a/clang/lib/Sema/SemaDecl.cpp
+++ b/clang/lib/Sema/SemaDecl.cpp
@@ -4985,7 +4985,7 @@ void Sema::setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
if (TagFromDeclSpec->hasNameForLinkage())
return;
- // A well-formed anonymous tag must always be a TUK_Definition.
+ // A well-formed anonymous tag must always be a TagUseKind::Definition.
assert(TagFromDeclSpec->isThisDeclarationADefinition());
// The type must match the tag exactly; no qualifiers allowed.
@@ -9217,19 +9217,20 @@ static NamedDecl *DiagnoseInvalidRedeclaration(
<< Idx << FDParam->getType()
<< NewFD->getParamDecl(Idx - 1)->getType();
} else if (FDisConst != NewFDisConst) {
- SemaRef.Diag(FD->getLocation(), diag::note_member_def_close_const_match)
- << NewFDisConst << FD->getSourceRange().getEnd()
- << (NewFDisConst
- ? FixItHint::CreateRemoval(ExtraArgs.D.getFunctionTypeInfo()
- .getConstQualifierLoc())
- : FixItHint::CreateInsertion(ExtraArgs.D.getFunctionTypeInfo()
- .getRParenLoc()
- .getLocWithOffset(1),
- " const"));
- } else
+ auto DB = SemaRef.Diag(FD->getLocation(),
+ diag::note_member_def_close_const_match)
+ << NewFDisConst << FD->getSourceRange().getEnd();
+ if (const auto &FTI = ExtraArgs.D.getFunctionTypeInfo(); !NewFDisConst)
+ DB << FixItHint::CreateInsertion(FTI.getRParenLoc().getLocWithOffset(1),
+ " const");
+ else if (FTI.hasMethodTypeQualifiers() &&
+ FTI.getConstQualifierLoc().isValid())
+ DB << FixItHint::CreateRemoval(FTI.getConstQualifierLoc());
+ } else {
SemaRef.Diag(FD->getLocation(),
IsMember ? diag::note_member_def_close_match
: diag::note_local_decl_close_match);
+ }
}
return nullptr;
}
@@ -17237,9 +17238,9 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
OffsetOfKind OOK, SkipBodyInfo *SkipBody) {
// If this is not a definition, it must have a name.
IdentifierInfo *OrigName = Name;
- assert((Name != nullptr || TUK == TUK_Definition) &&
+ assert((Name != nullptr || TUK == TagUseKind::Definition) &&
"Nameless record must be a definition!");
- assert(TemplateParameterLists.size() == 0 || TUK != TUK_Reference);
+ assert(TemplateParameterLists.size() == 0 || TUK != TagUseKind::Reference);
OwnedDecl = false;
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
@@ -17253,11 +17254,11 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// or a scope specifier, which also conveniently avoids this work
// for non-C++ cases.
if (TemplateParameterLists.size() > 0 ||
- (SS.isNotEmpty() && TUK != TUK_Reference)) {
+ (SS.isNotEmpty() && TUK != TagUseKind::Reference)) {
TemplateParameterList *TemplateParams =
MatchTemplateParametersToScopeSpecifier(
KWLoc, NameLoc, SS, nullptr, TemplateParameterLists,
- TUK == TUK_Friend, isMemberSpecialization, Invalid);
+ TUK == TagUseKind::Friend, isMemberSpecialization, Invalid);
// C++23 [dcl.type.elab] p2:
// If an elaborated-type-specifier is the sole constituent of a
@@ -17272,7 +17273,8 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// FIXME: Class template partial specializations can be forward declared
// per CWG2213, but the resolution failed to allow qualified forward
// declarations. This is almost certainly unintentional, so we allow them.
- if (TUK == TUK_Declaration && SS.isNotEmpty() && !isMemberSpecialization)
+ if (TUK == TagUseKind::Declaration && SS.isNotEmpty() &&
+ !isMemberSpecialization)
Diag(SS.getBeginLoc(), diag::err_standalone_class_nested_name_specifier)
<< TypeWithKeyword::getTagTypeKindName(Kind) << SS.getRange();
@@ -17309,7 +17311,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
return true;
}
- if (TUK == TUK_Friend && Kind == TagTypeKind::Enum) {
+ if (TUK == TagUseKind::Friend && Kind == TagTypeKind::Enum) {
// C++23 [dcl.type.elab]p4:
// If an elaborated-type-specifier appears with the friend specifier as
// an entire member-declaration, the member-declaration shall have one
@@ -17360,7 +17362,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// of 'int'. However, if this is an unfixed forward declaration, don't set
// the underlying type unless the user enables -fms-compatibility. This
// makes unfixed forward declared enums incomplete and is more conforming.
- if (TUK == TUK_Definition || getLangOpts().MSVCCompat)
+ if (TUK == TagUseKind::Definition || getLangOpts().MSVCCompat)
EnumUnderlying = Context.IntTy.getTypePtr();
}
}
@@ -17371,7 +17373,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
bool isStdAlignValT = false;
RedeclarationKind Redecl = forRedeclarationInCurContext();
- if (TUK == TUK_Friend || TUK == TUK_Reference)
+ if (TUK == TagUseKind::Friend || TUK == TagUseKind::Reference)
Redecl = RedeclarationKind::NotForRedeclaration;
/// Create a new tag decl in C/ObjC. Since the ODR-like semantics for ObjC/C
@@ -17389,7 +17391,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
New = EnumDecl::Create(Context, SearchDC, KWLoc, Loc, Name, nullptr,
ScopedEnum, ScopedEnumUsesClassTag, IsFixed);
// If this is an undefined enum, bail.
- if (TUK != TUK_Definition && !Invalid)
+ if (TUK != TagUseKind::Definition && !Invalid)
return nullptr;
if (EnumUnderlying) {
EnumDecl *ED = cast<EnumDecl>(New);
@@ -17417,7 +17419,8 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
+ if (TUK == TagUseKind::Definition &&
+ (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(RD);
AddMsStructLayoutForRecord(RD);
}
@@ -17438,7 +17441,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// If this is a friend or a reference to a class in a dependent
// context, don't try to make a decl for it.
- if (TUK == TUK_Friend || TUK == TUK_Reference) {
+ if (TUK == TagUseKind::Friend || TUK == TagUseKind::Reference) {
DC = computeDeclContext(SS, false);
if (!DC) {
IsDependent = true;
@@ -17471,7 +17474,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// this as a dependent elaborated-type-specifier.
// But this only makes any sense for reference-like lookups.
if (Previous.wasNotFoundInCurrentInstantiation() &&
- (TUK == TUK_Reference || TUK == TUK_Friend)) {
+ (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend)) {
IsDependent = true;
return true;
}
@@ -17488,7 +17491,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// If T is the name of a class, then each of the following shall have a
// name different from T:
// -- every member of class T that is itself a type
- if (TUK != TUK_Reference && TUK != TUK_Friend &&
+ if (TUK != TagUseKind::Reference && TUK != TagUseKind::Friend &&
DiagnoseClassNameShadow(SearchDC, DeclarationNameInfo(Name, NameLoc)))
return true;
@@ -17502,7 +17505,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// When declaring or defining a tag, ignore ambiguities introduced
// by types using'ed into this scope.
if (Previous.isAmbiguous() &&
- (TUK == TUK_Definition || TUK == TUK_Declaration)) {
+ (TUK == TagUseKind::Definition || TUK == TagUseKind::Declaration)) {
LookupResult::Filter F = Previous.makeFilter();
while (F.hasNext()) {
NamedDecl *ND = F.next();
@@ -17526,7 +17529,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
//
// Does it matter that this should be by scope instead of by
// semantic context?
- if (!Previous.empty() && TUK == TUK_Friend) {
+ if (!Previous.empty() && TUK == TagUseKind::Friend) {
DeclContext *EnclosingNS = SearchDC->getEnclosingNamespaceContext();
LookupResult::Filter F = Previous.makeFilter();
bool FriendSawTagOutsideEnclosingNamespace = false;
@@ -17556,7 +17559,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
if (Previous.isAmbiguous())
return true;
- if (!getLangOpts().CPlusPlus && TUK != TUK_Reference) {
+ if (!getLangOpts().CPlusPlus && TUK != TagUseKind::Reference) {
// FIXME: This makes sure that we ignore the contexts associated
// with C structs, unions, and enums when looking for a matching
// tag declaration or definition. See the similar lookup tweak
@@ -17608,11 +17611,12 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// also need to do a redeclaration lookup there, just in case
// there's a shadow friend decl.
if (Name && Previous.empty() &&
- (TUK == TUK_Reference || TUK == TUK_Friend || IsTemplateParamOrArg)) {
+ (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend ||
+ IsTemplateParamOrArg)) {
if (Invalid) goto CreateNewDecl;
assert(SS.isEmpty());
- if (TUK == TUK_Reference || IsTemplateParamOrArg) {
+ if (TUK == TagUseKind::Reference || IsTemplateParamOrArg) {
// C++ [basic.scope.pdecl]p5:
// -- for an elaborated-type-specifier of the form
//
@@ -17646,7 +17650,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// Find the scope where we'll be declaring the tag.
S = getTagInjectionScope(S, getLangOpts());
} else {
- assert(TUK == TUK_Friend);
+ assert(TUK == TagUseKind::Friend);
CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(SearchDC);
// C++ [namespace.memdef]p3:
@@ -17711,7 +17715,8 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// redefinition if either context is within the other.
if (auto *Shadow = dyn_cast<UsingShadowDecl>(DirectPrevDecl)) {
auto *OldTag = dyn_cast<TagDecl>(PrevDecl);
- if (SS.isEmpty() && TUK != TUK_Reference && TUK != TUK_Friend &&
+ if (SS.isEmpty() && TUK != TagUseKind::Reference &&
+ TUK != TagUseKind::Friend &&
isDeclInScope(Shadow, SearchDC, S, isMemberSpecialization) &&
!(OldTag && isAcceptableTagRedeclContext(
*this, OldTag->getDeclContext(), SearchDC))) {
@@ -17730,13 +17735,13 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// If this is a use of a previous tag, or if the tag is already declared
// in the same scope (so that the definition/declaration completes or
// rementions the tag), reuse the decl.
- if (TUK == TUK_Reference || TUK == TUK_Friend ||
+ if (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend ||
isDeclInScope(DirectPrevDecl, SearchDC, S,
SS.isNotEmpty() || isMemberSpecialization)) {
// Make sure that this wasn't declared as an enum and now used as a
// struct or something similar.
if (!isAcceptableTagRedeclaration(PrevTagDecl, Kind,
- TUK == TUK_Definition, KWLoc,
+ TUK == TagUseKind::Definition, KWLoc,
Name)) {
bool SafeToContinue =
(PrevTagDecl->getTagKind() != TagTypeKind::Enum &&
@@ -17763,7 +17768,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
if (Kind == TagTypeKind::Enum &&
PrevTagDecl->getTagKind() == TagTypeKind::Enum) {
const EnumDecl *PrevEnum = cast<EnumDecl>(PrevTagDecl);
- if (TUK == TUK_Reference || TUK == TUK_Friend)
+ if (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend)
return PrevTagDecl;
QualType EnumUnderlyingTy;
@@ -17778,14 +17783,14 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
if (CheckEnumRedeclaration(NameLoc.isValid() ? NameLoc : KWLoc,
ScopedEnum, EnumUnderlyingTy,
IsFixed, PrevEnum))
- return TUK == TUK_Declaration ? PrevTagDecl : nullptr;
+ return TUK == TagUseKind::Declaration ? PrevTagDecl : nullptr;
}
// C++11 [class.mem]p1:
// A member shall not be declared twice in the member-specification,
// except that a nested class or member class template can be declared
// and then later defined.
- if (TUK == TUK_Declaration && PrevDecl->isCXXClassMember() &&
+ if (TUK == TagUseKind::Declaration && PrevDecl->isCXXClassMember() &&
S->isDeclScope(PrevDecl)) {
Diag(NameLoc, diag::ext_member_redeclared);
Diag(PrevTagDecl->getLocation(), diag::note_previous_declaration);
@@ -17794,11 +17799,11 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
if (!Invalid) {
// If this is a use, just return the declaration we found, unless
// we have attributes.
- if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend) {
if (!Attrs.empty()) {
// FIXME: Diagnose these attributes. For now, we create a new
// declaration to hold them.
- } else if (TUK == TUK_Reference &&
+ } else if (TUK == TagUseKind::Reference &&
(PrevTagDecl->getFriendObjectKind() ==
Decl::FOK_Undeclared ||
PrevDecl->getOwningModule() != getCurrentModule()) &&
@@ -17822,7 +17827,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
}
// Diagnose attempts to redefine a tag.
- if (TUK == TUK_Definition) {
+ if (TUK == TagUseKind::Definition) {
if (NamedDecl *Def = PrevTagDecl->getDefinition()) {
// If we're defining a specialization and the previous definition
// is from an implicit instantiation, don't emit an error
@@ -17902,7 +17907,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// Okay, we're going to make a redeclaration. If this is some kind
// of reference, make sure we build the redeclaration in the same DC
// as the original, and ignore the current access specifier.
- if (TUK == TUK_Friend || TUK == TUK_Reference) {
+ if (TUK == TagUseKind::Friend || TUK == TagUseKind::Reference) {
SearchDC = PrevTagDecl->getDeclContext();
AS = AS_none;
}
@@ -17928,7 +17933,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// Use a better diagnostic if an elaborated-type-specifier
// found the wrong kind of type on the first
// (non-redeclaration) lookup.
- if ((TUK == TUK_Reference || TUK == TUK_Friend) &&
+ if ((TUK == TagUseKind::Reference || TUK == TagUseKind::Friend) &&
!Previous.isForRedeclaration()) {
NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
Diag(NameLoc, diag::err_tag_reference_non_tag)
@@ -17942,7 +17947,7 @@ Sema::ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
// do nothing
// Diagnose implicit declarations introduced by elaborated types.
- } else if (TUK == TUK_Reference || TUK == TUK_Friend) {
+ } else if (TUK == TagUseKind::Reference || TUK == TagUseKind::Friend) {
NonTagKind NTK = getNonTagTypeDeclKind(PrevDecl, Kind);
Diag(NameLoc, diag::err_tag_reference_conflict) << NTK;
Diag(PrevDecl->getLocation(), diag::note_previous_decl) << PrevDecl;
@@ -18001,7 +18006,7 @@ CreateNewDecl:
StdAlignValT = cast<EnumDecl>(New);
// If this is an undefined enum, warn.
- if (TUK != TUK_Definition && !Invalid) {
+ if (TUK != TagUseKind::Definition && !Invalid) {
TagDecl *Def;
if (IsFixed && cast<EnumDecl>(New)->isFixed()) {
// C++0x: 7.2p2: opaque-enum-declaration.
@@ -18051,21 +18056,22 @@ CreateNewDecl:
}
// Only C23 and later allow defining new types in 'offsetof()'.
- if (OOK != OOK_Outside && TUK == TUK_Definition && !getLangOpts().CPlusPlus &&
- !getLangOpts().C23)
+ if (OOK != OOK_Outside && TUK == TagUseKind::Definition &&
+ !getLangOpts().CPlusPlus && !getLangOpts().C23)
Diag(New->getLocation(), diag::ext_type_defined_in_offsetof)
<< (OOK == OOK_Macro) << New->getSourceRange();
// C++11 [dcl.type]p3:
// A type-specifier-seq shall not define a class or enumeration [...].
if (!Invalid && getLangOpts().CPlusPlus &&
- (IsTypeSpecifier || IsTemplateParamOrArg) && TUK == TUK_Definition) {
+ (IsTypeSpecifier || IsTemplateParamOrArg) &&
+ TUK == TagUseKind::Definition) {
Diag(New->getLocation(), diag::err_type_defined_in_type_specifier)
<< Context.getTagDeclType(New);
Invalid = true;
}
- if (!Invalid && getLangOpts().CPlusPlus && TUK == TUK_Definition &&
+ if (!Invalid && getLangOpts().CPlusPlus && TUK == TagUseKind::Definition &&
DC->getDeclKind() == Decl::Enum) {
Diag(New->getLocation(), diag::err_type_defined_in_enum)
<< Context.getTagDeclType(New);
@@ -18077,7 +18083,7 @@ CreateNewDecl:
if (SS.isSet()) {
// If this is either a declaration or a definition, check the
// nested-name-specifier against the current context.
- if ((TUK == TUK_Definition || TUK == TUK_Declaration) &&
+ if ((TUK == TagUseKind::Definition || TUK == TagUseKind::Declaration) &&
diagnoseQualifiedDeclaration(SS, DC, OrigName, Loc,
/*TemplateId=*/nullptr,
isMemberSpecialization))
@@ -18102,7 +18108,7 @@ CreateNewDecl:
// many points during the parsing of a struct declaration (because
// the #pragma tokens are effectively skipped over during the
// parsing of the struct).
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(RD);
AddMsStructLayoutForRecord(RD);
}
@@ -18133,7 +18139,7 @@ CreateNewDecl:
if (getLangOpts().CPlusPlus) {
// C++ [dcl.fct]p6:
// Types shall not be defined in return or parameter types.
- if (TUK == TUK_Definition && !IsTypeSpecifier) {
+ if (TUK == TagUseKind::Definition && !IsTypeSpecifier) {
Diag(Loc, diag::err_type_defined_in_param_type)
<< Name;
Invalid = true;
@@ -18154,7 +18160,7 @@ CreateNewDecl:
// In Microsoft mode, a friend declaration also acts as a forward
// declaration so we always pass true to setObjectOfFriendDecl to make
// the tag name visible.
- if (TUK == TUK_Friend)
+ if (TUK == TagUseKind::Friend)
New->setObjectOfFriendDecl(getLangOpts().MSVCCompat);
// Set the access specifier.
@@ -18164,14 +18170,14 @@ CreateNewDecl:
if (PrevDecl)
CheckRedeclarationInModule(New, PrevDecl);
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip))
New->startDefinition();
ProcessDeclAttributeList(S, New, Attrs);
AddPragmaAttributes(S, New);
// If this has an identifier, add it to the scope stack.
- if (TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
// We might be replacing an existing declaration in the lookup tables;
// if so, borrow its access specifier.
if (PrevDecl)
diff --git a/clang/lib/Sema/SemaDeclCXX.cpp b/clang/lib/Sema/SemaDeclCXX.cpp
index 822538198505..8ab429e2a136 100644
--- a/clang/lib/Sema/SemaDeclCXX.cpp
+++ b/clang/lib/Sema/SemaDeclCXX.cpp
@@ -2656,188 +2656,122 @@ bool Sema::isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS) {
return false;
}
-/// Determine whether the given class is a base class of the given
-/// class, including looking at dependent bases.
-static bool findCircularInheritance(const CXXRecordDecl *Class,
- const CXXRecordDecl *Current) {
- SmallVector<const CXXRecordDecl*, 8> Queue;
-
- Class = Class->getCanonicalDecl();
- while (true) {
- for (const auto &I : Current->bases()) {
- CXXRecordDecl *Base = I.getType()->getAsCXXRecordDecl();
- if (!Base)
- continue;
-
- Base = Base->getDefinition();
- if (!Base)
- continue;
-
- if (Base->getCanonicalDecl() == Class)
- return true;
-
- Queue.push_back(Base);
- }
-
- if (Queue.empty())
- return false;
-
- Current = Queue.pop_back_val();
- }
-
- return false;
-}
-
/// Check the validity of a C++ base class specifier.
///
/// \returns a new CXXBaseSpecifier if well-formed, emits diagnostics
/// and returns NULL otherwise.
-CXXBaseSpecifier *
-Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
- SourceRange SpecifierRange,
- bool Virtual, AccessSpecifier Access,
- TypeSourceInfo *TInfo,
- SourceLocation EllipsisLoc) {
- // In HLSL, unspecified class access is public rather than private.
- if (getLangOpts().HLSL && Class->getTagKind() == TagTypeKind::Class &&
- Access == AS_none)
- Access = AS_public;
-
+CXXBaseSpecifier *Sema::CheckBaseSpecifier(CXXRecordDecl *Class,
+ SourceRange SpecifierRange,
+ bool Virtual, AccessSpecifier Access,
+ TypeSourceInfo *TInfo,
+ SourceLocation EllipsisLoc) {
QualType BaseType = TInfo->getType();
+ SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc();
if (BaseType->containsErrors()) {
// Already emitted a diagnostic when parsing the error type.
return nullptr;
}
- // C++ [class.union]p1:
- // A union shall not have base classes.
- if (Class->isUnion()) {
- Diag(Class->getLocation(), diag::err_base_clause_on_union)
- << SpecifierRange;
- return nullptr;
- }
- if (EllipsisLoc.isValid() &&
- !TInfo->getType()->containsUnexpandedParameterPack()) {
+ if (EllipsisLoc.isValid() && !BaseType->containsUnexpandedParameterPack()) {
Diag(EllipsisLoc, diag::err_pack_expansion_without_parameter_packs)
<< TInfo->getTypeLoc().getSourceRange();
EllipsisLoc = SourceLocation();
}
- SourceLocation BaseLoc = TInfo->getTypeLoc().getBeginLoc();
-
- if (BaseType->isDependentType()) {
- // Make sure that we don't have circular inheritance among our dependent
- // bases. For non-dependent bases, the check for completeness below handles
- // this.
- if (CXXRecordDecl *BaseDecl = BaseType->getAsCXXRecordDecl()) {
- if (BaseDecl->getCanonicalDecl() == Class->getCanonicalDecl() ||
- ((BaseDecl = BaseDecl->getDefinition()) &&
- findCircularInheritance(Class, BaseDecl))) {
- Diag(BaseLoc, diag::err_circular_inheritance)
- << BaseType << Context.getTypeDeclType(Class);
-
- if (BaseDecl->getCanonicalDecl() != Class->getCanonicalDecl())
- Diag(BaseDecl->getLocation(), diag::note_previous_decl)
- << BaseType;
+ auto *BaseDecl =
+ dyn_cast_if_present<CXXRecordDecl>(computeDeclContext(BaseType));
+ // C++ [class.derived.general]p2:
+ // A class-or-decltype shall denote a (possibly cv-qualified) class type
+ // that is not an incompletely defined class; any cv-qualifiers are
+ // ignored.
+ if (BaseDecl) {
+ // C++ [class.union.general]p4:
+ // [...] A union shall not be used as a base class.
+ if (BaseDecl->isUnion()) {
+ Diag(BaseLoc, diag::err_union_as_base_class) << SpecifierRange;
+ return nullptr;
+ }
- return nullptr;
+ // For the MS ABI, propagate DLL attributes to base class templates.
+ if (Context.getTargetInfo().getCXXABI().isMicrosoft() ||
+ Context.getTargetInfo().getTriple().isPS()) {
+ if (Attr *ClassAttr = getDLLAttr(Class)) {
+ if (auto *BaseSpec =
+ dyn_cast<ClassTemplateSpecializationDecl>(BaseDecl)) {
+ propagateDLLAttrToBaseClassTemplate(Class, ClassAttr, BaseSpec,
+ BaseLoc);
+ }
}
}
+ if (RequireCompleteType(BaseLoc, BaseType, diag::err_incomplete_base_class,
+ SpecifierRange)) {
+ Class->setInvalidDecl();
+ return nullptr;
+ }
+
+ BaseDecl = BaseDecl->getDefinition();
+ assert(BaseDecl && "Base type is not incomplete, but has no definition");
+
+ // Microsoft docs say:
+ // "If a base-class has a code_seg attribute, derived classes must have the
+ // same attribute."
+ const auto *BaseCSA = BaseDecl->getAttr<CodeSegAttr>();
+ const auto *DerivedCSA = Class->getAttr<CodeSegAttr>();
+ if ((DerivedCSA || BaseCSA) &&
+ (!BaseCSA || !DerivedCSA ||
+ BaseCSA->getName() != DerivedCSA->getName())) {
+ Diag(Class->getLocation(), diag::err_mismatched_code_seg_base);
+ Diag(BaseDecl->getLocation(), diag::note_base_class_specified_here)
+ << BaseDecl;
+ return nullptr;
+ }
+
+ // A class which contains a flexible array member is not suitable for use as
+ // a base class:
+ // - If the layout determines that a base comes before another base,
+ // the flexible array member would index into the subsequent base.
+ // - If the layout determines that base comes before the derived class,
+ // the flexible array member would index into the derived class.
+ if (BaseDecl->hasFlexibleArrayMember()) {
+ Diag(BaseLoc, diag::err_base_class_has_flexible_array_member)
+ << BaseDecl->getDeclName();
+ return nullptr;
+ }
+
+ // C++ [class]p3:
+ // If a class is marked final and it appears as a base-type-specifier in
+ // base-clause, the program is ill-formed.
+ if (FinalAttr *FA = BaseDecl->getAttr<FinalAttr>()) {
+ Diag(BaseLoc, diag::err_class_marked_final_used_as_base)
+ << BaseDecl->getDeclName() << FA->isSpelledAsSealed();
+ Diag(BaseDecl->getLocation(), diag::note_entity_declared_at)
+ << BaseDecl->getDeclName() << FA->getRange();
+ return nullptr;
+ }
+
+ // If the base class is invalid the derived class is as well.
+ if (BaseDecl->isInvalidDecl())
+ Class->setInvalidDecl();
+ } else if (BaseType->isDependentType()) {
// Make sure that we don't make an ill-formed AST where the type of the
// Class is non-dependent and its attached base class specifier is an
// dependent type, which violates invariants in many clang code paths (e.g.
// constexpr evaluator). If this case happens (in errory-recovery mode), we
// explicitly mark the Class decl invalid. The diagnostic was already
// emitted.
- if (!Class->getTypeForDecl()->isDependentType())
+ if (!Class->isDependentContext())
Class->setInvalidDecl();
- return new (Context) CXXBaseSpecifier(
- SpecifierRange, Virtual, Class->getTagKind() == TagTypeKind::Class,
- Access, TInfo, EllipsisLoc);
- }
-
- // Base specifiers must be record types.
- if (!BaseType->isRecordType()) {
+ } else {
+ // The base class is some non-dependent non-class type.
Diag(BaseLoc, diag::err_base_must_be_class) << SpecifierRange;
return nullptr;
}
- // C++ [class.union]p1:
- // A union shall not be used as a base class.
- if (BaseType->isUnionType()) {
- Diag(BaseLoc, diag::err_union_as_base_class) << SpecifierRange;
- return nullptr;
- }
-
- // For the MS ABI, propagate DLL attributes to base class templates.
- if (Context.getTargetInfo().getCXXABI().isMicrosoft() ||
- Context.getTargetInfo().getTriple().isPS()) {
- if (Attr *ClassAttr = getDLLAttr(Class)) {
- if (auto *BaseTemplate = dyn_cast_or_null<ClassTemplateSpecializationDecl>(
- BaseType->getAsCXXRecordDecl())) {
- propagateDLLAttrToBaseClassTemplate(Class, ClassAttr, BaseTemplate,
- BaseLoc);
- }
- }
- }
-
- // C++ [class.derived]p2:
- // The class-name in a base-specifier shall not be an incompletely
- // defined class.
- if (RequireCompleteType(BaseLoc, BaseType,
- diag::err_incomplete_base_class, SpecifierRange)) {
- Class->setInvalidDecl();
- return nullptr;
- }
-
- // If the base class is polymorphic or isn't empty, the new one is/isn't, too.
- RecordDecl *BaseDecl = BaseType->castAs<RecordType>()->getDecl();
- assert(BaseDecl && "Record type has no declaration");
- BaseDecl = BaseDecl->getDefinition();
- assert(BaseDecl && "Base type is not incomplete, but has no definition");
- CXXRecordDecl *CXXBaseDecl = cast<CXXRecordDecl>(BaseDecl);
- assert(CXXBaseDecl && "Base type is not a C++ type");
-
- // Microsoft docs say:
- // "If a base-class has a code_seg attribute, derived classes must have the
- // same attribute."
- const auto *BaseCSA = CXXBaseDecl->getAttr<CodeSegAttr>();
- const auto *DerivedCSA = Class->getAttr<CodeSegAttr>();
- if ((DerivedCSA || BaseCSA) &&
- (!BaseCSA || !DerivedCSA || BaseCSA->getName() != DerivedCSA->getName())) {
- Diag(Class->getLocation(), diag::err_mismatched_code_seg_base);
- Diag(CXXBaseDecl->getLocation(), diag::note_base_class_specified_here)
- << CXXBaseDecl;
- return nullptr;
- }
-
- // A class which contains a flexible array member is not suitable for use as a
- // base class:
- // - If the layout determines that a base comes before another base,
- // the flexible array member would index into the subsequent base.
- // - If the layout determines that base comes before the derived class,
- // the flexible array member would index into the derived class.
- if (CXXBaseDecl->hasFlexibleArrayMember()) {
- Diag(BaseLoc, diag::err_base_class_has_flexible_array_member)
- << CXXBaseDecl->getDeclName();
- return nullptr;
- }
-
- // C++ [class]p3:
- // If a class is marked final and it appears as a base-type-specifier in
- // base-clause, the program is ill-formed.
- if (FinalAttr *FA = CXXBaseDecl->getAttr<FinalAttr>()) {
- Diag(BaseLoc, diag::err_class_marked_final_used_as_base)
- << CXXBaseDecl->getDeclName()
- << FA->isSpelledAsSealed();
- Diag(CXXBaseDecl->getLocation(), diag::note_entity_declared_at)
- << CXXBaseDecl->getDeclName() << FA->getRange();
- return nullptr;
- }
-
- if (BaseDecl->isInvalidDecl())
- Class->setInvalidDecl();
+ // In HLSL, unspecified class access is public rather than private.
+ if (getLangOpts().HLSL && Class->getTagKind() == TagTypeKind::Class &&
+ Access == AS_none)
+ Access = AS_public;
// Create the base specifier.
return new (Context) CXXBaseSpecifier(
@@ -2887,13 +2821,20 @@ BaseResult Sema::ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange,
UPPC_BaseType))
return true;
+ // C++ [class.union.general]p4:
+ // [...] A union shall not have base classes.
+ if (Class->isUnion()) {
+ Diag(Class->getLocation(), diag::err_base_clause_on_union)
+ << SpecifierRange;
+ return true;
+ }
+
if (CXXBaseSpecifier *BaseSpec = CheckBaseSpecifier(Class, SpecifierRange,
Virtual, Access, TInfo,
EllipsisLoc))
return BaseSpec;
- else
- Class->setInvalidDecl();
+ Class->setInvalidDecl();
return true;
}
@@ -17639,11 +17580,12 @@ DeclResult Sema::ActOnTemplatedFriendTag(
if (Invalid)
return true;
- return CheckClassTemplate(S, TagSpec, TUK_Friend, TagLoc, SS, Name,
- NameLoc, Attr, TemplateParams, AS_public,
+ return CheckClassTemplate(S, TagSpec, TagUseKind::Friend, TagLoc, SS,
+ Name, NameLoc, Attr, TemplateParams, AS_public,
/*ModulePrivateLoc=*/SourceLocation(),
FriendLoc, TempParamLists.size() - 1,
- TempParamLists.data()).get();
+ TempParamLists.data())
+ .get();
} else {
// The "template<>" header is extraneous.
Diag(TemplateParams->getTemplateLoc(), diag::err_template_tag_noparams)
@@ -17671,8 +17613,8 @@ DeclResult Sema::ActOnTemplatedFriendTag(
if (SS.isEmpty()) {
bool Owned = false;
bool IsDependent = false;
- return ActOnTag(S, TagSpec, TUK_Friend, TagLoc, SS, Name, NameLoc, Attr,
- AS_public,
+ return ActOnTag(S, TagSpec, TagUseKind::Friend, TagLoc, SS, Name, NameLoc,
+ Attr, AS_public,
/*ModulePrivateLoc=*/SourceLocation(),
MultiTemplateParamsArg(), Owned, IsDependent,
/*ScopedEnumKWLoc=*/SourceLocation(),
@@ -17787,7 +17729,7 @@ Decl *Sema::ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
// Try to convert the decl specifier to a type. This works for
// friend templates because ActOnTag never produces a ClassTemplateDecl
- // for a TUK_Friend.
+ // for a TagUseKind::Friend.
Declarator TheDeclarator(DS, ParsedAttributesView::none(),
DeclaratorContext::Member);
TypeSourceInfo *TSI = GetTypeForDeclarator(TheDeclarator);
diff --git a/clang/lib/Sema/SemaExpr.cpp b/clang/lib/Sema/SemaExpr.cpp
index 5ecfdee21f09..326879b0883f 100644
--- a/clang/lib/Sema/SemaExpr.cpp
+++ b/clang/lib/Sema/SemaExpr.cpp
@@ -54,6 +54,7 @@
#include "clang/Sema/SemaInternal.h"
#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaPseudoObject.h"
#include "clang/Sema/Template.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/STLForwardCompat.h"
@@ -2718,34 +2719,6 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
return ExprError();
}
- // C++ [temp.dep.expr]p3:
- // An id-expression is type-dependent if it contains:
- // -- an identifier that was declared with a dependent type,
- // (note: handled after lookup)
- // -- a template-id that is dependent,
- // (note: handled in BuildTemplateIdExpr)
- // -- a conversion-function-id that specifies a dependent type,
- // -- a nested-name-specifier that contains a class-name that
- // names a dependent type.
- // Determine whether this is a member of an unknown specialization;
- // we need to handle these differently.
- bool DependentID = false;
- if (Name.getNameKind() == DeclarationName::CXXConversionFunctionName &&
- Name.getCXXNameType()->isDependentType()) {
- DependentID = true;
- } else if (SS.isSet()) {
- if (DeclContext *DC = computeDeclContext(SS, false)) {
- if (RequireCompleteDeclContext(SS, DC))
- return ExprError();
- } else {
- DependentID = true;
- }
- }
-
- if (DependentID)
- return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
- IsAddressOfOperand, TemplateArgs);
-
// BoundsSafety: This specially handles arguments of bounds attributes
// appertains to a type of C struct field such that the name lookup
// within a struct finds the member name, which is not the case for other
@@ -2781,7 +2754,7 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
&AssumedTemplate))
return ExprError();
- if (R.wasNotFoundInCurrentInstantiation())
+ if (R.wasNotFoundInCurrentInstantiation() || SS.isInvalid())
return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
IsAddressOfOperand, TemplateArgs);
} else {
@@ -2791,7 +2764,7 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
// If the result might be in a dependent base class, this is a dependent
// id-expression.
- if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)
+ if (R.wasNotFoundInCurrentInstantiation() || SS.isInvalid())
return ActOnDependentIdExpression(SS, TemplateKWLoc, NameInfo,
IsAddressOfOperand, TemplateArgs);
@@ -2946,26 +2919,14 @@ Sema::ActOnIdExpression(Scope *S, CXXScopeSpec &SS,
/// this path.
ExprResult Sema::BuildQualifiedDeclarationNameExpr(
CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo,
- bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI) {
- if (NameInfo.getName().isDependentName())
- return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
- NameInfo, /*TemplateArgs=*/nullptr);
-
- DeclContext *DC = computeDeclContext(SS, false);
- if (!DC)
- return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
- NameInfo, /*TemplateArgs=*/nullptr);
-
- if (RequireCompleteDeclContext(SS, DC))
- return ExprError();
-
+ bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI) {
LookupResult R(*this, NameInfo, LookupOrdinaryName);
- LookupQualifiedName(R, DC);
+ LookupParsedName(R, /*S=*/nullptr, &SS, /*ObjectType=*/QualType());
if (R.isAmbiguous())
return ExprError();
- if (R.getResultKind() == LookupResult::NotFoundInCurrentInstantiation)
+ if (R.wasNotFoundInCurrentInstantiation() || SS.isInvalid())
return BuildDependentDeclRefExpr(SS, /*TemplateKWLoc=*/SourceLocation(),
NameInfo, /*TemplateArgs=*/nullptr);
@@ -2974,6 +2935,7 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
// diagnostic during template instantiation is likely bogus, e.g. if a class
// is invalid because it's derived from an invalid base class, then missing
// members were likely supposed to be inherited.
+ DeclContext *DC = computeDeclContext(SS);
if (const auto *CD = dyn_cast<CXXRecordDecl>(DC))
if (CD->isInvalidDecl())
return ExprError();
@@ -3017,16 +2979,14 @@ ExprResult Sema::BuildQualifiedDeclarationNameExpr(
return ExprEmpty();
}
- // Defend against this resolving to an implicit member access. We usually
- // won't get here if this might be a legitimate a class member (we end up in
- // BuildMemberReferenceExpr instead), but this can be valid if we're forming
- // a pointer-to-member or in an unevaluated context in C++11.
- if (!R.empty() && (*R.begin())->isCXXClassMember() && !IsAddressOfOperand)
+ // If necessary, build an implicit class member access.
+ if (isPotentialImplicitMemberAccess(SS, R, IsAddressOfOperand))
return BuildPossibleImplicitMemberExpr(SS,
/*TemplateKWLoc=*/SourceLocation(),
- R, /*TemplateArgs=*/nullptr, S);
+ R, /*TemplateArgs=*/nullptr,
+ /*S=*/nullptr);
- return BuildDeclarationNameExpr(SS, R, /* ADL */ false);
+ return BuildDeclarationNameExpr(SS, R, /*ADL=*/false);
}
/// Cast a base object to a member's actual type.
@@ -3190,7 +3150,7 @@ bool Sema::UseArgumentDependentLookup(const CXXScopeSpec &SS,
return false;
// Never if a scope specifier was provided.
- if (SS.isSet())
+ if (SS.isNotEmpty())
return false;
// Only in C++ or ObjC++.
@@ -3718,9 +3678,7 @@ static Expr *BuildFloatingLiteral(Sema &S, NumericLiteralParser &Literal,
APFloat::getSmallest(Format).toString(buffer);
}
- S.Diag(Loc, diagnostic)
- << Ty
- << StringRef(buffer.data(), buffer.size());
+ S.Diag(Loc, diagnostic) << Ty << buffer.str();
}
bool isExact = (result == APFloat::opOK);
@@ -5283,36 +5241,22 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
<< ResultType << BaseExpr->getSourceRange();
return ExprError();
}
- } else if (const VectorType *VTy = LHSTy->getAs<VectorType>()) {
- BaseExpr = LHSExp; // vectors: V[123]
- IndexExpr = RHSExp;
- // We apply C++ DR1213 to vector subscripting too.
- if (getLangOpts().CPlusPlus11 && LHSExp->isPRValue()) {
- ExprResult Materialized = TemporaryMaterializationConversion(LHSExp);
- if (Materialized.isInvalid())
- return ExprError();
- LHSExp = Materialized.get();
+ } else if (LHSTy->isSubscriptableVectorType()) {
+ if (LHSTy->isBuiltinType() &&
+ LHSTy->getAs<BuiltinType>()->isSveVLSBuiltinType()) {
+ const BuiltinType *BTy = LHSTy->getAs<BuiltinType>();
+ if (BTy->isSVEBool())
+ return ExprError(Diag(LLoc, diag::err_subscript_svbool_t)
+ << LHSExp->getSourceRange()
+ << RHSExp->getSourceRange());
+ ResultType = BTy->getSveEltType(Context);
+ } else {
+ const VectorType *VTy = LHSTy->getAs<VectorType>();
+ ResultType = VTy->getElementType();
}
- VK = LHSExp->getValueKind();
- if (VK != VK_PRValue)
- OK = OK_VectorComponent;
-
- ResultType = VTy->getElementType();
- QualType BaseType = BaseExpr->getType();
- Qualifiers BaseQuals = BaseType.getQualifiers();
- Qualifiers MemberQuals = ResultType.getQualifiers();
- Qualifiers Combined = BaseQuals + MemberQuals;
- if (Combined != MemberQuals)
- ResultType = Context.getQualifiedType(ResultType, Combined);
- } else if (LHSTy->isBuiltinType() &&
- LHSTy->getAs<BuiltinType>()->isSveVLSBuiltinType()) {
- const BuiltinType *BTy = LHSTy->getAs<BuiltinType>();
- if (BTy->isSVEBool())
- return ExprError(Diag(LLoc, diag::err_subscript_svbool_t)
- << LHSExp->getSourceRange() << RHSExp->getSourceRange());
-
- BaseExpr = LHSExp;
+ BaseExpr = LHSExp; // vectors: V[123]
IndexExpr = RHSExp;
+ // We apply C++ DR1213 to vector subscripting too.
if (getLangOpts().CPlusPlus11 && LHSExp->isPRValue()) {
ExprResult Materialized = TemporaryMaterializationConversion(LHSExp);
if (Materialized.isInvalid())
@@ -5323,8 +5267,6 @@ Sema::CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
if (VK != VK_PRValue)
OK = OK_VectorComponent;
- ResultType = BTy->getSveEltType(Context);
-
QualType BaseType = BaseExpr->getType();
Qualifiers BaseQuals = BaseType.getQualifiers();
Qualifiers MemberQuals = ResultType.getQualifiers();
@@ -15282,7 +15224,7 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
LHSExpr = LHS.get();
RHSExpr = RHS.get();
- // We want to end up calling one of checkPseudoObjectAssignment
+ // We want to end up calling one of SemaPseudoObject::checkAssignment
// (if the LHS is a pseudo-object), BuildOverloadedBinOp (if
// both expressions are overloadable or either is type-dependent),
// or CreateBuiltinBinOp (in any other case). We also want to get
@@ -15293,7 +15235,7 @@ ExprResult Sema::BuildBinOp(Scope *S, SourceLocation OpLoc,
// Assignments with a pseudo-object l-value need special analysis.
if (pty->getKind() == BuiltinType::PseudoObject &&
BinaryOperator::isAssignmentOp(Opc))
- return checkPseudoObjectAssignment(S, OpLoc, Opc, LHSExpr, RHSExpr);
+ return PseudoObject().checkAssignment(S, OpLoc, Opc, LHSExpr, RHSExpr);
// Don't resolve overloads if the other type is overloadable.
if (getLangOpts().CPlusPlus && pty->getKind() == BuiltinType::Overload) {
@@ -15716,7 +15658,7 @@ ExprResult Sema::BuildUnaryOp(Scope *S, SourceLocation OpLoc,
// Increment and decrement of pseudo-object references.
if (pty->getKind() == BuiltinType::PseudoObject &&
UnaryOperator::isIncrementDecrementOp(Opc))
- return checkPseudoObjectIncDec(S, OpLoc, Opc, Input);
+ return PseudoObject().checkIncDec(S, OpLoc, Opc, Input);
// extension is always a builtin operator.
if (Opc == UO_Extension)
@@ -20933,7 +20875,7 @@ ExprResult Sema::CheckPlaceholderExpr(Expr *E) {
// Pseudo-objects.
case BuiltinType::PseudoObject:
- return checkPseudoObjectRValue(E);
+ return PseudoObject().checkRValue(E);
case BuiltinType::BuiltinFn: {
// Accept __noop without parens by implicitly converting it to a call expr.
diff --git a/clang/lib/Sema/SemaLookup.cpp b/clang/lib/Sema/SemaLookup.cpp
index 0834db95d42a..e4d4cd7395eb 100644
--- a/clang/lib/Sema/SemaLookup.cpp
+++ b/clang/lib/Sema/SemaLookup.cpp
@@ -2771,9 +2771,6 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
ObjectType->castAs<TagType>()->isBeingDefined()) &&
"Caller should have completed object type");
} else if (SS && SS->isNotEmpty()) {
- if (NestedNameSpecifier *NNS = SS->getScopeRep();
- NNS->getKind() == NestedNameSpecifier::Super)
- return LookupInSuper(R, NNS->getAsRecordDecl());
// This nested-name-specifier occurs after another nested-name-specifier,
// so long into the context associated with the prior nested-name-specifier.
if ((DC = computeDeclContext(*SS, EnteringContext))) {
@@ -2781,6 +2778,12 @@ bool Sema::LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
if (!DC->isDependentContext() && RequireCompleteDeclContext(*SS, DC))
return false;
R.setContextRange(SS->getRange());
+ // FIXME: '__super' lookup semantics could be implemented by a
+ // LookupResult::isSuperLookup flag which skips the initial search of
+ // the lookup context in LookupQualified.
+ if (NestedNameSpecifier *NNS = SS->getScopeRep();
+ NNS->getKind() == NestedNameSpecifier::Super)
+ return LookupInSuper(R, NNS->getAsRecordDecl());
}
IsDependent = !DC && isDependentScopeSpecifier(*SS);
} else {
diff --git a/clang/lib/Sema/SemaOpenACC.cpp b/clang/lib/Sema/SemaOpenACC.cpp
index f174b2fa63c6..09d91b31cfe5 100644
--- a/clang/lib/Sema/SemaOpenACC.cpp
+++ b/clang/lib/Sema/SemaOpenACC.cpp
@@ -233,6 +233,19 @@ bool doesClauseApplyToDirective(OpenACCDirectiveKind DirectiveKind,
return false;
}
+ case OpenACCClauseKind::Reduction:
+ switch (DirectiveKind) {
+ case OpenACCDirectiveKind::Parallel:
+ case OpenACCDirectiveKind::Serial:
+ case OpenACCDirectiveKind::Loop:
+ case OpenACCDirectiveKind::ParallelLoop:
+ case OpenACCDirectiveKind::SerialLoop:
+ case OpenACCDirectiveKind::KernelsLoop:
+ return true;
+ default:
+ return false;
+ }
+
default:
// Do nothing so we can go to the 'unimplemented' diagnostic instead.
return true;
@@ -281,7 +294,6 @@ bool checkValidAfterDeviceType(
return true;
}
}
-
} // namespace
SemaOpenACC::SemaOpenACC(Sema &S) : SemaBase(S) {}
@@ -426,6 +438,22 @@ SemaOpenACC::ActOnClause(ArrayRef<const OpenACCClause *> ExistingClauses,
<< /*NoArgs=*/1 << Clause.getDirectiveKind() << MaxArgs
<< Clause.getIntExprs().size();
+ // OpenACC 3.3 Section 2.5.4:
+ // A reduction clause may not appear on a parallel construct with a
+ // num_gangs clause that has more than one argument.
+ if (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel &&
+ Clause.getIntExprs().size() > 1) {
+ auto *Parallel =
+ llvm::find_if(ExistingClauses, llvm::IsaPred<OpenACCReductionClause>);
+
+ if (Parallel != ExistingClauses.end()) {
+ Diag(Clause.getBeginLoc(), diag::err_acc_reduction_num_gangs_conflict)
+ << Clause.getIntExprs().size();
+ Diag((*Parallel)->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
+ }
+ }
+
// Create the AST node for the clause even if the number of expressions is
// incorrect.
return OpenACCNumGangsClause::Create(
@@ -706,6 +734,46 @@ SemaOpenACC::ActOnClause(ArrayRef<const OpenACCClause *> ExistingClauses,
Clause.getLParenLoc(), Clause.getDeviceTypeArchitectures(),
Clause.getEndLoc());
}
+ case OpenACCClauseKind::Reduction: {
+ // Restrictions only properly implemented on 'compute' constructs, and
+ // 'compute' constructs are the only construct that can do anything with
+ // this yet, so skip/treat as unimplemented in this case.
+ if (!isOpenACCComputeDirectiveKind(Clause.getDirectiveKind()))
+ break;
+
+ // OpenACC 3.3 Section 2.5.4:
+ // A reduction clause may not appear on a parallel construct with a
+ // num_gangs clause that has more than one argument.
+ if (Clause.getDirectiveKind() == OpenACCDirectiveKind::Parallel) {
+ auto NumGangsClauses = llvm::make_filter_range(
+ ExistingClauses, llvm::IsaPred<OpenACCNumGangsClause>);
+
+ for (auto *NGC : NumGangsClauses) {
+ unsigned NumExprs =
+ cast<OpenACCNumGangsClause>(NGC)->getIntExprs().size();
+
+ if (NumExprs > 1) {
+ Diag(Clause.getBeginLoc(), diag::err_acc_reduction_num_gangs_conflict)
+ << NumExprs;
+ Diag(NGC->getBeginLoc(), diag::note_acc_previous_clause_here);
+ return nullptr;
+ }
+ }
+ }
+
+ SmallVector<Expr *> ValidVars;
+
+ for (Expr *Var : Clause.getVarList()) {
+ ExprResult Res = CheckReductionVar(Var);
+
+ if (Res.isUsable())
+ ValidVars.push_back(Res.get());
+ }
+
+ return OpenACCReductionClause::Create(
+ getASTContext(), Clause.getBeginLoc(), Clause.getLParenLoc(),
+ Clause.getReductionOp(), ValidVars, Clause.getEndLoc());
+ }
default:
break;
}
@@ -715,6 +783,66 @@ SemaOpenACC::ActOnClause(ArrayRef<const OpenACCClause *> ExistingClauses,
return nullptr;
}
+/// OpenACC 3.3 section 2.5.15:
+/// At a mininmum, the supported data types include ... the numerical data types
+/// in C, C++, and Fortran.
+///
+/// If the reduction var is a composite variable, each
+/// member of the composite variable must be a supported datatype for the
+/// reduction operation.
+ExprResult SemaOpenACC::CheckReductionVar(Expr *VarExpr) {
+ VarExpr = VarExpr->IgnoreParenCasts();
+
+ auto TypeIsValid = [](QualType Ty) {
+ return Ty->isDependentType() || Ty->isScalarType();
+ };
+
+ if (isa<ArraySectionExpr>(VarExpr)) {
+ Expr *ASExpr = VarExpr;
+ QualType BaseTy = ArraySectionExpr::getBaseOriginalType(ASExpr);
+ QualType EltTy = getASTContext().getBaseElementType(BaseTy);
+
+ if (!TypeIsValid(EltTy)) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_type)
+ << EltTy << /*Sub array base type*/ 1;
+ return ExprError();
+ }
+ } else if (auto *RD = VarExpr->getType()->getAsRecordDecl()) {
+ if (!RD->isStruct() && !RD->isClass()) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
+ << /*not class or struct*/ 0 << VarExpr->getType();
+ return ExprError();
+ }
+
+ if (!RD->isCompleteDefinition()) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
+ << /*incomplete*/ 1 << VarExpr->getType();
+ return ExprError();
+ }
+ if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD);
+ CXXRD && !CXXRD->isAggregate()) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_composite_type)
+ << /*aggregate*/ 2 << VarExpr->getType();
+ return ExprError();
+ }
+
+ for (FieldDecl *FD : RD->fields()) {
+ if (!TypeIsValid(FD->getType())) {
+ Diag(VarExpr->getExprLoc(),
+ diag::err_acc_reduction_composite_member_type);
+ Diag(FD->getLocation(), diag::note_acc_reduction_composite_member_loc);
+ return ExprError();
+ }
+ }
+ } else if (!TypeIsValid(VarExpr->getType())) {
+ Diag(VarExpr->getExprLoc(), diag::err_acc_reduction_type)
+ << VarExpr->getType() << /*Sub array base type*/ 0;
+ return ExprError();
+ }
+
+ return VarExpr;
+}
+
void SemaOpenACC::ActOnConstruct(OpenACCDirectiveKind K,
SourceLocation StartLoc) {
switch (K) {
@@ -864,9 +992,7 @@ bool SemaOpenACC::CheckVarIsPointerType(OpenACCClauseKind ClauseKind,
return false;
}
-ExprResult SemaOpenACC::ActOnVar(Expr *VarExpr) {
- // We still need to retain the array subscript/subarray exprs, so work on a
- // copy.
+ExprResult SemaOpenACC::ActOnVar(OpenACCClauseKind CK, Expr *VarExpr) {
Expr *CurVarExpr = VarExpr->IgnoreParenImpCasts();
// Sub-arrays/subscript-exprs are fine as long as the base is a
@@ -882,14 +1008,19 @@ ExprResult SemaOpenACC::ActOnVar(Expr *VarExpr) {
// References to a VarDecl are fine.
if (const auto *DRE = dyn_cast<DeclRefExpr>(CurVarExpr)) {
if (isa<VarDecl, NonTypeTemplateParmDecl>(
- DRE->getDecl()->getCanonicalDecl()))
+ DRE->getFoundDecl()->getCanonicalDecl()))
return VarExpr;
}
+ // If CK is a Reduction, this special cases for OpenACC3.3 2.5.15: "A var in a
+ // reduction clause must be a scalar variable name, an aggregate variable
+ // name, an array element, or a subarray.
// A MemberExpr that references a Field is valid.
- if (const auto *ME = dyn_cast<MemberExpr>(CurVarExpr)) {
- if (isa<FieldDecl>(ME->getMemberDecl()->getCanonicalDecl()))
- return VarExpr;
+ if (CK != OpenACCClauseKind::Reduction) {
+ if (const auto *ME = dyn_cast<MemberExpr>(CurVarExpr)) {
+ if (isa<FieldDecl>(ME->getMemberDecl()->getCanonicalDecl()))
+ return VarExpr;
+ }
}
// Referring to 'this' is always OK.
@@ -898,7 +1029,9 @@ ExprResult SemaOpenACC::ActOnVar(Expr *VarExpr) {
// Nothing really we can do here, as these are dependent. So just return they
// are valid.
- if (isa<DependentScopeDeclRefExpr, CXXDependentScopeMemberExpr>(CurVarExpr))
+ if (isa<DependentScopeDeclRefExpr>(CurVarExpr) ||
+ (CK != OpenACCClauseKind::Reduction &&
+ isa<CXXDependentScopeMemberExpr>(CurVarExpr)))
return VarExpr;
// There isn't really anything we can do in the case of a recovery expr, so
@@ -906,7 +1039,8 @@ ExprResult SemaOpenACC::ActOnVar(Expr *VarExpr) {
if (isa<RecoveryExpr>(CurVarExpr))
return ExprError();
- Diag(VarExpr->getExprLoc(), diag::err_acc_not_a_var_ref);
+ Diag(VarExpr->getExprLoc(), diag::err_acc_not_a_var_ref)
+ << (CK != OpenACCClauseKind::Reduction);
return ExprError();
}
diff --git a/clang/lib/Sema/SemaPseudoObject.cpp b/clang/lib/Sema/SemaPseudoObject.cpp
index 14ed9590afc6..fdb584ceb810 100644
--- a/clang/lib/Sema/SemaPseudoObject.cpp
+++ b/clang/lib/Sema/SemaPseudoObject.cpp
@@ -29,6 +29,7 @@
//
//===----------------------------------------------------------------------===//
+#include "clang/Sema/SemaPseudoObject.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/Basic/CharInfo.h"
@@ -1446,24 +1447,24 @@ ExprResult MSPropertyOpBuilder::buildSet(Expr *op, SourceLocation sl,
// General Sema routines.
//===----------------------------------------------------------------------===//
-ExprResult Sema::checkPseudoObjectRValue(Expr *E) {
+ExprResult SemaPseudoObject::checkRValue(Expr *E) {
Expr *opaqueRef = E->IgnoreParens();
if (ObjCPropertyRefExpr *refExpr
= dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- ObjCPropertyOpBuilder builder(*this, refExpr, true);
+ ObjCPropertyOpBuilder builder(SemaRef, refExpr, true);
return builder.buildRValueOperation(E);
}
else if (ObjCSubscriptRefExpr *refExpr
= dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
- ObjCSubscriptOpBuilder builder(*this, refExpr, true);
+ ObjCSubscriptOpBuilder builder(SemaRef, refExpr, true);
return builder.buildRValueOperation(E);
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr, true);
+ MSPropertyOpBuilder builder(SemaRef, refExpr, true);
return builder.buildRValueOperation(E);
} else if (MSPropertySubscriptExpr *RefExpr =
dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
- MSPropertyOpBuilder Builder(*this, RefExpr, true);
+ MSPropertyOpBuilder Builder(SemaRef, RefExpr, true);
return Builder.buildRValueOperation(E);
} else {
llvm_unreachable("unknown pseudo-object kind!");
@@ -1471,48 +1472,48 @@ ExprResult Sema::checkPseudoObjectRValue(Expr *E) {
}
/// Check an increment or decrement of a pseudo-object expression.
-ExprResult Sema::checkPseudoObjectIncDec(Scope *Sc, SourceLocation opcLoc,
+ExprResult SemaPseudoObject::checkIncDec(Scope *Sc, SourceLocation opcLoc,
UnaryOperatorKind opcode, Expr *op) {
// Do nothing if the operand is dependent.
if (op->isTypeDependent())
- return UnaryOperator::Create(Context, op, opcode, Context.DependentTy,
- VK_PRValue, OK_Ordinary, opcLoc, false,
- CurFPFeatureOverrides());
+ return UnaryOperator::Create(
+ SemaRef.Context, op, opcode, SemaRef.Context.DependentTy, VK_PRValue,
+ OK_Ordinary, opcLoc, false, SemaRef.CurFPFeatureOverrides());
assert(UnaryOperator::isIncrementDecrementOp(opcode));
Expr *opaqueRef = op->IgnoreParens();
if (ObjCPropertyRefExpr *refExpr
= dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- ObjCPropertyOpBuilder builder(*this, refExpr, false);
+ ObjCPropertyOpBuilder builder(SemaRef, refExpr, false);
return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else if (isa<ObjCSubscriptRefExpr>(opaqueRef)) {
Diag(opcLoc, diag::err_illegal_container_subscripting_op);
return ExprError();
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr, false);
+ MSPropertyOpBuilder builder(SemaRef, refExpr, false);
return builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else if (MSPropertySubscriptExpr *RefExpr
= dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
- MSPropertyOpBuilder Builder(*this, RefExpr, false);
+ MSPropertyOpBuilder Builder(SemaRef, RefExpr, false);
return Builder.buildIncDecOperation(Sc, opcLoc, opcode, op);
} else {
llvm_unreachable("unknown pseudo-object kind!");
}
}
-ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
+ExprResult SemaPseudoObject::checkAssignment(Scope *S, SourceLocation opcLoc,
BinaryOperatorKind opcode,
Expr *LHS, Expr *RHS) {
// Do nothing if either argument is dependent.
if (LHS->isTypeDependent() || RHS->isTypeDependent())
- return BinaryOperator::Create(Context, LHS, RHS, opcode,
- Context.DependentTy, VK_PRValue, OK_Ordinary,
- opcLoc, CurFPFeatureOverrides());
+ return BinaryOperator::Create(
+ SemaRef.Context, LHS, RHS, opcode, SemaRef.Context.DependentTy,
+ VK_PRValue, OK_Ordinary, opcLoc, SemaRef.CurFPFeatureOverrides());
// Filter out non-overload placeholder types in the RHS.
if (RHS->getType()->isNonOverloadPlaceholderType()) {
- ExprResult result = CheckPlaceholderExpr(RHS);
+ ExprResult result = SemaRef.CheckPlaceholderExpr(RHS);
if (result.isInvalid()) return ExprError();
RHS = result.get();
}
@@ -1521,20 +1522,20 @@ ExprResult Sema::checkPseudoObjectAssignment(Scope *S, SourceLocation opcLoc,
Expr *opaqueRef = LHS->IgnoreParens();
if (ObjCPropertyRefExpr *refExpr
= dyn_cast<ObjCPropertyRefExpr>(opaqueRef)) {
- ObjCPropertyOpBuilder builder(*this, refExpr, IsSimpleAssign);
+ ObjCPropertyOpBuilder builder(SemaRef, refExpr, IsSimpleAssign);
return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (ObjCSubscriptRefExpr *refExpr
= dyn_cast<ObjCSubscriptRefExpr>(opaqueRef)) {
- ObjCSubscriptOpBuilder builder(*this, refExpr, IsSimpleAssign);
+ ObjCSubscriptOpBuilder builder(SemaRef, refExpr, IsSimpleAssign);
return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (MSPropertyRefExpr *refExpr
= dyn_cast<MSPropertyRefExpr>(opaqueRef)) {
- MSPropertyOpBuilder builder(*this, refExpr, IsSimpleAssign);
- return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ MSPropertyOpBuilder builder(SemaRef, refExpr, IsSimpleAssign);
+ return builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else if (MSPropertySubscriptExpr *RefExpr
= dyn_cast<MSPropertySubscriptExpr>(opaqueRef)) {
- MSPropertyOpBuilder Builder(*this, RefExpr, IsSimpleAssign);
- return Builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
+ MSPropertyOpBuilder Builder(SemaRef, RefExpr, IsSimpleAssign);
+ return Builder.buildAssignmentOperation(S, opcLoc, opcode, LHS, RHS);
} else {
llvm_unreachable("unknown pseudo-object kind!");
}
@@ -1557,36 +1558,38 @@ static Expr *stripOpaqueValuesFromPseudoObjectRef(Sema &S, Expr *E) {
/// This is a hack which should be removed when TreeTransform is
/// capable of rebuilding a tree without stripping implicit
/// operations.
-Expr *Sema::recreateSyntacticForm(PseudoObjectExpr *E) {
+Expr *SemaPseudoObject::recreateSyntacticForm(PseudoObjectExpr *E) {
Expr *syntax = E->getSyntacticForm();
if (UnaryOperator *uop = dyn_cast<UnaryOperator>(syntax)) {
- Expr *op = stripOpaqueValuesFromPseudoObjectRef(*this, uop->getSubExpr());
- return UnaryOperator::Create(Context, op, uop->getOpcode(), uop->getType(),
- uop->getValueKind(), uop->getObjectKind(),
- uop->getOperatorLoc(), uop->canOverflow(),
- CurFPFeatureOverrides());
+ Expr *op = stripOpaqueValuesFromPseudoObjectRef(SemaRef, uop->getSubExpr());
+ return UnaryOperator::Create(
+ SemaRef.Context, op, uop->getOpcode(), uop->getType(),
+ uop->getValueKind(), uop->getObjectKind(), uop->getOperatorLoc(),
+ uop->canOverflow(), SemaRef.CurFPFeatureOverrides());
} else if (CompoundAssignOperator *cop
= dyn_cast<CompoundAssignOperator>(syntax)) {
- Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, cop->getLHS());
+ Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(SemaRef, cop->getLHS());
Expr *rhs = cast<OpaqueValueExpr>(cop->getRHS())->getSourceExpr();
return CompoundAssignOperator::Create(
- Context, lhs, rhs, cop->getOpcode(), cop->getType(),
+ SemaRef.Context, lhs, rhs, cop->getOpcode(), cop->getType(),
cop->getValueKind(), cop->getObjectKind(), cop->getOperatorLoc(),
- CurFPFeatureOverrides(), cop->getComputationLHSType(),
+ SemaRef.CurFPFeatureOverrides(), cop->getComputationLHSType(),
cop->getComputationResultType());
} else if (BinaryOperator *bop = dyn_cast<BinaryOperator>(syntax)) {
- Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(*this, bop->getLHS());
+ Expr *lhs = stripOpaqueValuesFromPseudoObjectRef(SemaRef, bop->getLHS());
Expr *rhs = cast<OpaqueValueExpr>(bop->getRHS())->getSourceExpr();
- return BinaryOperator::Create(Context, lhs, rhs, bop->getOpcode(),
+ return BinaryOperator::Create(SemaRef.Context, lhs, rhs, bop->getOpcode(),
bop->getType(), bop->getValueKind(),
bop->getObjectKind(), bop->getOperatorLoc(),
- CurFPFeatureOverrides());
+ SemaRef.CurFPFeatureOverrides());
} else if (isa<CallExpr>(syntax)) {
return syntax;
} else {
assert(syntax->hasPlaceholderType(BuiltinType::PseudoObject));
- return stripOpaqueValuesFromPseudoObjectRef(*this, syntax);
+ return stripOpaqueValuesFromPseudoObjectRef(SemaRef, syntax);
}
}
+
+SemaPseudoObject::SemaPseudoObject(Sema &S) : SemaBase(S) {}
diff --git a/clang/lib/Sema/SemaRISCVVectorLookup.cpp b/clang/lib/Sema/SemaRISCV.cpp
index 26e13e87b1d6..26e13e87b1d6 100644
--- a/clang/lib/Sema/SemaRISCVVectorLookup.cpp
+++ b/clang/lib/Sema/SemaRISCV.cpp
diff --git a/clang/lib/Sema/SemaTemplate.cpp b/clang/lib/Sema/SemaTemplate.cpp
index 4937cce4621f..f930bd8651d5 100644
--- a/clang/lib/Sema/SemaTemplate.cpp
+++ b/clang/lib/Sema/SemaTemplate.cpp
@@ -726,44 +726,22 @@ Sema::ActOnDependentIdExpression(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs) {
- DeclContext *DC = getFunctionLevelDeclContext();
-
- // C++11 [expr.prim.general]p12:
- // An id-expression that denotes a non-static data member or non-static
- // member function of a class can only be used:
- // (...)
- // - if that id-expression denotes a non-static data member and it
- // appears in an unevaluated operand.
- //
- // If this might be the case, form a DependentScopeDeclRefExpr instead of a
- // CXXDependentScopeMemberExpr. The former can instantiate to either
- // DeclRefExpr or MemberExpr depending on lookup results, while the latter is
- // always a MemberExpr.
- bool MightBeCxx11UnevalField =
- getLangOpts().CPlusPlus11 && isUnevaluatedContext();
-
- // Check if the nested name specifier is an enum type.
- bool IsEnum = false;
- if (NestedNameSpecifier *NNS = SS.getScopeRep())
- IsEnum = isa_and_nonnull<EnumType>(NNS->getAsType());
-
- if (!MightBeCxx11UnevalField && !isAddressOfOperand && !IsEnum &&
- isa<CXXMethodDecl>(DC) &&
- cast<CXXMethodDecl>(DC)->isImplicitObjectMemberFunction()) {
- QualType ThisType =
- cast<CXXMethodDecl>(DC)->getThisType().getNonReferenceType();
-
- // Since the 'this' expression is synthesized, we don't need to
- // perform the double-lookup check.
- NamedDecl *FirstQualifierInScope = nullptr;
+ if (SS.isEmpty()) {
+ // FIXME: This codepath is only used by dependent unqualified names
+ // (e.g. a dependent conversion-function-id, or operator= once we support
+ // it). It doesn't quite do the right thing, and it will silently fail if
+ // getCurrentThisType() returns null.
+ QualType ThisType = getCurrentThisType();
+ if (ThisType.isNull())
+ return ExprError();
return CXXDependentScopeMemberExpr::Create(
- Context, /*This=*/nullptr, ThisType,
+ Context, /*Base=*/nullptr, ThisType,
/*IsArrow=*/!Context.getLangOpts().HLSL,
- /*Op=*/SourceLocation(), SS.getWithLocInContext(Context), TemplateKWLoc,
- FirstQualifierInScope, NameInfo, TemplateArgs);
+ /*OperatorLoc=*/SourceLocation(),
+ /*QualifierLoc=*/NestedNameSpecifierLoc(), TemplateKWLoc,
+ /*FirstQualifierFoundInScope=*/nullptr, NameInfo, TemplateArgs);
}
-
return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
}
@@ -772,13 +750,15 @@ Sema::BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs) {
- // DependentScopeDeclRefExpr::Create requires a valid QualifierLoc
- NestedNameSpecifierLoc QualifierLoc = SS.getWithLocInContext(Context);
- if (!QualifierLoc)
- return ExprError();
+ // DependentScopeDeclRefExpr::Create requires a valid NestedNameSpecifierLoc
+ if (!SS.isValid())
+ return CreateRecoveryExpr(
+ SS.getBeginLoc(),
+ TemplateArgs ? TemplateArgs->getRAngleLoc() : NameInfo.getEndLoc(), {});
return DependentScopeDeclRefExpr::Create(
- Context, QualifierLoc, TemplateKWLoc, NameInfo, TemplateArgs);
+ Context, SS.getWithLocInContext(Context), TemplateKWLoc, NameInfo,
+ TemplateArgs);
}
@@ -1091,7 +1071,8 @@ NamedDecl *Sema::ActOnTypeParameter(Scope *S, bool Typename,
return Param;
}
- Param->setDefaultArgument(DefaultTInfo);
+ Param->setDefaultArgument(
+ Context, TemplateArgumentLoc(DefaultTInfo->getType(), DefaultTInfo));
}
return Param;
@@ -1859,7 +1840,8 @@ DeclResult Sema::CheckClassTemplate(
TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody) {
assert(TemplateParams && TemplateParams->size() > 0 &&
"No template parameters");
- assert(TUK != TUK_Reference && "Can only declare or define class templates");
+ assert(TUK != TagUseKind::Reference &&
+ "Can only declare or define class templates");
bool Invalid = false;
// Check that we can declare a template here.
@@ -1881,8 +1863,9 @@ DeclResult Sema::CheckClassTemplate(
// C++11 [basic.lookup.elab]p2).
DeclContext *SemanticContext;
LookupResult Previous(*this, Name, NameLoc,
- (SS.isEmpty() && TUK == TUK_Friend)
- ? LookupTagName : LookupOrdinaryName,
+ (SS.isEmpty() && TUK == TagUseKind::Friend)
+ ? LookupTagName
+ : LookupOrdinaryName,
forRedeclarationInCurContext());
if (SS.isNotEmpty() && !SS.isInvalid()) {
SemanticContext = computeDeclContext(SS, true);
@@ -1890,11 +1873,11 @@ DeclResult Sema::CheckClassTemplate(
// FIXME: Horrible, horrible hack! We can't currently represent this
// in the AST, and historically we have just ignored such friend
// class templates, so don't complain here.
- Diag(NameLoc, TUK == TUK_Friend
+ Diag(NameLoc, TUK == TagUseKind::Friend
? diag::warn_template_qualified_friend_ignored
: diag::err_template_qualified_declarator_no_match)
<< SS.getScopeRep() << SS.getRange();
- return TUK != TUK_Friend;
+ return TUK != TagUseKind::Friend;
}
if (RequireCompleteDeclContext(SS, SemanticContext))
@@ -1909,7 +1892,7 @@ DeclResult Sema::CheckClassTemplate(
Invalid = true;
}
- if (TUK != TUK_Friend && TUK != TUK_Reference)
+ if (TUK != TagUseKind::Friend && TUK != TagUseKind::Reference)
diagnoseQualifiedDeclaration(SS, SemanticContext, Name, NameLoc,
/*TemplateId-*/ nullptr,
/*IsMemberSpecialization*/ false);
@@ -1922,7 +1905,7 @@ DeclResult Sema::CheckClassTemplate(
// If T is the name of a class, then each of the following shall have a
// name different from T:
// -- every member template of class T
- if (TUK != TUK_Friend &&
+ if (TUK != TagUseKind::Friend &&
DiagnoseClassNameShadow(SemanticContext,
DeclarationNameInfo(Name, NameLoc)))
return true;
@@ -1964,7 +1947,7 @@ DeclResult Sema::CheckClassTemplate(
}
}
- if (TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
// C++ [namespace.memdef]p3:
// [...] When looking for a prior declaration of a class or a function
// declared as a friend, and when the name of the friend class or
@@ -2001,9 +1984,8 @@ DeclResult Sema::CheckClassTemplate(
PrevDecl = (*Previous.begin())->getUnderlyingDecl();
}
}
- } else if (PrevDecl &&
- !isDeclInScope(Previous.getRepresentativeDecl(), SemanticContext,
- S, SS.isValid()))
+ } else if (PrevDecl && !isDeclInScope(Previous.getRepresentativeDecl(),
+ SemanticContext, S, SS.isValid()))
PrevDecl = PrevClassTemplate = nullptr;
if (auto *Shadow = dyn_cast_or_null<UsingShadowDecl>(
@@ -2025,7 +2007,7 @@ DeclResult Sema::CheckClassTemplate(
// Ensure that the template parameter lists are compatible. Skip this check
// for a friend in a dependent context: the template parameter list itself
// could be dependent.
- if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
+ if (!(TUK == TagUseKind::Friend && CurContext->isDependentContext()) &&
!TemplateParameterListsAreEqual(
TemplateCompareNewDeclInfo(SemanticContext ? SemanticContext
: CurContext,
@@ -2041,8 +2023,8 @@ DeclResult Sema::CheckClassTemplate(
// the class-key shall agree in kind with the original class
// template declaration (7.1.5.3).
RecordDecl *PrevRecordDecl = PrevClassTemplate->getTemplatedDecl();
- if (!isAcceptableTagRedeclaration(PrevRecordDecl, Kind,
- TUK == TUK_Definition, KWLoc, Name)) {
+ if (!isAcceptableTagRedeclaration(
+ PrevRecordDecl, Kind, TUK == TagUseKind::Definition, KWLoc, Name)) {
Diag(KWLoc, diag::err_use_with_wrong_tag)
<< Name
<< FixItHint::CreateReplacement(KWLoc, PrevRecordDecl->getKindName());
@@ -2051,7 +2033,7 @@ DeclResult Sema::CheckClassTemplate(
}
// Check for redefinition of this class template.
- if (TUK == TUK_Definition) {
+ if (TUK == TagUseKind::Definition) {
if (TagDecl *Def = PrevRecordDecl->getDefinition()) {
// If we have a prior definition that is not visible, treat this as
// simply making that previous definition visible.
@@ -2088,7 +2070,7 @@ DeclResult Sema::CheckClassTemplate(
// merging in the template parameter list from the previous class
// template declaration. Skip this check for a friend in a dependent
// context, because the template parameter list might be dependent.
- if (!(TUK == TUK_Friend && CurContext->isDependentContext()) &&
+ if (!(TUK == TagUseKind::Friend && CurContext->isDependentContext()) &&
CheckTemplateParameterList(
TemplateParams,
PrevClassTemplate ? GetTemplateParameterList(PrevClassTemplate)
@@ -2096,8 +2078,8 @@ DeclResult Sema::CheckClassTemplate(
(SS.isSet() && SemanticContext && SemanticContext->isRecord() &&
SemanticContext->isDependentContext())
? TPC_ClassTemplateMember
- : TUK == TUK_Friend ? TPC_FriendClassTemplate
- : TPC_ClassTemplate,
+ : TUK == TagUseKind::Friend ? TPC_FriendClassTemplate
+ : TPC_ClassTemplate,
SkipBody))
Invalid = true;
@@ -2105,9 +2087,10 @@ DeclResult Sema::CheckClassTemplate(
// If the name of the template was qualified, we must be defining the
// template out-of-line.
if (!SS.isInvalid() && !Invalid && !PrevClassTemplate) {
- Diag(NameLoc, TUK == TUK_Friend ? diag::err_friend_decl_does_not_match
- : diag::err_member_decl_does_not_match)
- << Name << SemanticContext << /*IsDefinition*/true << SS.getRange();
+ Diag(NameLoc, TUK == TagUseKind::Friend
+ ? diag::err_friend_decl_does_not_match
+ : diag::err_member_decl_does_not_match)
+ << Name << SemanticContext << /*IsDefinition*/ true << SS.getRange();
Invalid = true;
}
}
@@ -2117,8 +2100,8 @@ DeclResult Sema::CheckClassTemplate(
// recent declaration tricking the template instantiator to make substitutions
// there.
// FIXME: Figure out how to combine with shouldLinkDependentDeclWithPrevious
- bool ShouldAddRedecl
- = !(TUK == TUK_Friend && CurContext->isDependentContext());
+ bool ShouldAddRedecl =
+ !(TUK == TagUseKind::Friend && CurContext->isDependentContext());
CXXRecordDecl *NewClass =
CXXRecordDecl::Create(Context, Kind, SemanticContext, KWLoc, NameLoc, Name,
@@ -2133,7 +2116,7 @@ DeclResult Sema::CheckClassTemplate(
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(NewClass);
AddMsStructLayoutForRecord(NewClass);
}
@@ -2164,14 +2147,15 @@ DeclResult Sema::CheckClassTemplate(
PrevClassTemplate->setMemberSpecialization();
// Set the access specifier.
- if (!Invalid && TUK != TUK_Friend && NewTemplate->getDeclContext()->isRecord())
+ if (!Invalid && TUK != TagUseKind::Friend &&
+ NewTemplate->getDeclContext()->isRecord())
SetMemberAccessSpecifier(NewTemplate, PrevClassTemplate, AS);
// Set the lexical context of these templates
NewClass->setLexicalDeclContext(CurContext);
NewTemplate->setLexicalDeclContext(CurContext);
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip))
NewClass->startDefinition();
ProcessDeclAttributeList(S, NewClass, Attr);
@@ -2184,7 +2168,7 @@ DeclResult Sema::CheckClassTemplate(
inferGslOwnerPointerAttribute(NewClass);
inferNullableClassAttribute(NewClass);
- if (TUK != TUK_Friend) {
+ if (TUK != TagUseKind::Friend) {
// Per C++ [basic.scope.temp]p2, skip the template parameter scopes.
Scope *Outer = S;
while ((Outer->getFlags() & Scope::TemplateParamScope) != 0)
@@ -2338,11 +2322,11 @@ transformTemplateTypeParam(Sema &SemaRef, DeclContext *DC,
SemaRef.SubstTypeConstraint(NewTTP, TC, Args,
/*EvaluateConstraint=*/true);
if (TTP->hasDefaultArgument()) {
- TypeSourceInfo *InstantiatedDefaultArg =
- SemaRef.SubstType(TTP->getDefaultArgumentInfo(), Args,
- TTP->getDefaultArgumentLoc(), TTP->getDeclName());
- if (InstantiatedDefaultArg)
- NewTTP->setDefaultArgument(InstantiatedDefaultArg);
+ TemplateArgumentLoc InstantiatedDefaultArg;
+ if (!SemaRef.SubstTemplateArgument(
+ TTP->getDefaultArgument(), Args, InstantiatedDefaultArg,
+ TTP->getDefaultArgumentLoc(), TTP->getDeclName()))
+ NewTTP->setDefaultArgument(SemaRef.Context, InstantiatedDefaultArg);
}
SemaRef.CurrentInstantiationScope->InstantiatedLocal(TTP, NewTTP);
return NewTTP;
@@ -3595,10 +3579,9 @@ bool Sema::CheckTemplateParameterList(TemplateParameterList *NewParams,
= dyn_cast<TemplateTypeParmDecl>(*NewParam)) {
// Check the presence of a default argument here.
if (NewTypeParm->hasDefaultArgument() &&
- DiagnoseDefaultTemplateArgument(*this, TPC,
- NewTypeParm->getLocation(),
- NewTypeParm->getDefaultArgumentInfo()->getTypeLoc()
- .getSourceRange()))
+ DiagnoseDefaultTemplateArgument(
+ *this, TPC, NewTypeParm->getLocation(),
+ NewTypeParm->getDefaultArgument().getSourceRange()))
NewTypeParm->removeDefaultArgument();
// Merge default arguments for template type parameters.
@@ -5035,7 +5018,7 @@ TypeResult Sema::ActOnTagTemplateIdType(TagUseKind TUK,
IdentifierInfo *Id = D->getIdentifier();
assert(Id && "templated class must have an identifier");
- if (!isAcceptableTagRedeclaration(D, TagKind, TUK == TUK_Definition,
+ if (!isAcceptableTagRedeclaration(D, TagKind, TUK == TagUseKind::Definition,
TagLoc, Id)) {
Diag(TagLoc, diag::err_use_with_wrong_tag)
<< Result
@@ -5747,50 +5730,36 @@ ExprResult Sema::BuildTemplateIdExpr(const CXXScopeSpec &SS,
}
// We actually only call this from template instantiation.
-ExprResult
-Sema::BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
- SourceLocation TemplateKWLoc,
- const DeclarationNameInfo &NameInfo,
- const TemplateArgumentListInfo *TemplateArgs) {
-
+ExprResult Sema::BuildQualifiedTemplateIdExpr(
+ CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
+ const DeclarationNameInfo &NameInfo,
+ const TemplateArgumentListInfo *TemplateArgs, bool IsAddressOfOperand) {
assert(TemplateArgs || TemplateKWLoc.isValid());
- DeclContext *DC;
- if (!(DC = computeDeclContext(SS, false)) ||
- DC->isDependentContext() ||
- RequireCompleteDeclContext(SS, DC))
- return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
LookupResult R(*this, NameInfo, LookupOrdinaryName);
- if (LookupTemplateName(R, (Scope *)nullptr, SS, QualType(),
- /*Entering*/ false, TemplateKWLoc))
+ if (LookupTemplateName(R, /*S=*/nullptr, SS, /*ObjectType=*/QualType(),
+ /*EnteringContext=*/false, TemplateKWLoc))
return ExprError();
if (R.isAmbiguous())
return ExprError();
+ if (R.wasNotFoundInCurrentInstantiation() || SS.isInvalid())
+ return BuildDependentDeclRefExpr(SS, TemplateKWLoc, NameInfo, TemplateArgs);
+
if (R.empty()) {
+ DeclContext *DC = computeDeclContext(SS);
Diag(NameInfo.getLoc(), diag::err_no_member)
<< NameInfo.getName() << DC << SS.getRange();
return ExprError();
}
- auto DiagnoseTypeTemplateDecl = [&](TemplateDecl *Temp,
- bool isTypeAliasTemplateDecl) {
- Diag(NameInfo.getLoc(), diag::err_template_kw_refers_to_type_template)
- << SS.getScopeRep() << NameInfo.getName().getAsString() << SS.getRange()
- << isTypeAliasTemplateDecl;
- Diag(Temp->getLocation(), diag::note_referenced_type_template)
- << isTypeAliasTemplateDecl;
- return CreateRecoveryExpr(NameInfo.getBeginLoc(), NameInfo.getEndLoc(), {});
- };
-
- if (ClassTemplateDecl *Temp = R.getAsSingle<ClassTemplateDecl>())
- return DiagnoseTypeTemplateDecl(Temp, false);
-
- if (TypeAliasTemplateDecl *Temp = R.getAsSingle<TypeAliasTemplateDecl>())
- return DiagnoseTypeTemplateDecl(Temp, true);
+ // If necessary, build an implicit class member access.
+ if (isPotentialImplicitMemberAccess(SS, R, IsAddressOfOperand))
+ return BuildPossibleImplicitMemberExpr(SS, TemplateKWLoc, R, TemplateArgs,
+ /*S=*/nullptr);
- return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL*/ false, TemplateArgs);
+ return BuildTemplateIdExpr(SS, TemplateKWLoc, R, /*ADL=*/false, TemplateArgs);
}
/// Form a template name from a name that is syntactically required to name a
@@ -5982,8 +5951,7 @@ bool Sema::CheckTemplateTypeArgument(
LookupParsedName(Result, CurScope, &SS, /*ObjectType=*/QualType());
if (Result.getAsSingle<TypeDecl>() ||
- Result.getResultKind() ==
- LookupResult::NotFoundInCurrentInstantiation) {
+ Result.wasNotFoundInCurrentInstantiation()) {
assert(SS.getScopeRep() && "dependent scope expr must has a scope!");
// Suggest that the user add 'typename' before the NNS.
SourceLocation Loc = AL.getSourceRange().getBegin();
@@ -6075,22 +6043,26 @@ bool Sema::CheckTemplateTypeArgument(
///
/// \param Converted the list of template arguments provided for template
/// parameters that precede \p Param in the template parameter list.
-/// \returns the substituted template argument, or NULL if an error occurred.
-static TypeSourceInfo *SubstDefaultTemplateArgument(
+///
+/// \param Output the resulting substituted template argument.
+///
+/// \returns true if an error occurred.
+static bool SubstDefaultTemplateArgument(
Sema &SemaRef, TemplateDecl *Template, SourceLocation TemplateLoc,
SourceLocation RAngleLoc, TemplateTypeParmDecl *Param,
ArrayRef<TemplateArgument> SugaredConverted,
- ArrayRef<TemplateArgument> CanonicalConverted) {
- TypeSourceInfo *ArgType = Param->getDefaultArgumentInfo();
+ ArrayRef<TemplateArgument> CanonicalConverted,
+ TemplateArgumentLoc &Output) {
+ Output = Param->getDefaultArgument();
// If the argument type is dependent, instantiate it now based
// on the previously-computed template arguments.
- if (ArgType->getType()->isInstantiationDependentType()) {
+ if (Output.getArgument().isInstantiationDependent()) {
Sema::InstantiatingTemplate Inst(SemaRef, TemplateLoc, Param, Template,
SugaredConverted,
SourceRange(TemplateLoc, RAngleLoc));
if (Inst.isInvalid())
- return nullptr;
+ return true;
// Only substitute for the innermost template argument list.
MultiLevelTemplateArgumentList TemplateArgLists(Template, SugaredConverted,
@@ -6103,12 +6075,14 @@ static TypeSourceInfo *SubstDefaultTemplateArgument(
ForLambdaCallOperator = Rec->isLambda();
Sema::ContextRAII SavedContext(SemaRef, Template->getDeclContext(),
!ForLambdaCallOperator);
- ArgType =
- SemaRef.SubstType(ArgType, TemplateArgLists,
- Param->getDefaultArgumentLoc(), Param->getDeclName());
+
+ if (SemaRef.SubstTemplateArgument(Output, TemplateArgLists, Output,
+ Param->getDefaultArgumentLoc(),
+ Param->getDeclName()))
+ return true;
}
- return ArgType;
+ return false;
}
/// Substitute template arguments into the default template argument for
@@ -6231,13 +6205,12 @@ TemplateArgumentLoc Sema::SubstDefaultTemplateArgumentIfAvailable(
return TemplateArgumentLoc();
HasDefaultArg = true;
- TypeSourceInfo *DI = SubstDefaultTemplateArgument(
- *this, Template, TemplateLoc, RAngleLoc, TypeParm, SugaredConverted,
- CanonicalConverted);
- if (DI)
- return TemplateArgumentLoc(TemplateArgument(DI->getType()), DI);
-
- return TemplateArgumentLoc();
+ TemplateArgumentLoc Output;
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
+ TypeParm, SugaredConverted,
+ CanonicalConverted, Output))
+ return TemplateArgumentLoc();
+ return Output;
}
if (NonTypeTemplateParmDecl *NonTypeParm
@@ -6820,14 +6793,10 @@ bool Sema::CheckTemplateArgumentList(
return diagnoseMissingArgument(*this, TemplateLoc, Template, TTP,
NewArgs);
- TypeSourceInfo *ArgType = SubstDefaultTemplateArgument(
- *this, Template, TemplateLoc, RAngleLoc, TTP, SugaredConverted,
- CanonicalConverted);
- if (!ArgType)
+ if (SubstDefaultTemplateArgument(*this, Template, TemplateLoc, RAngleLoc,
+ TTP, SugaredConverted,
+ CanonicalConverted, Arg))
return true;
-
- Arg = TemplateArgumentLoc(TemplateArgument(ArgType->getType()),
- ArgType);
} else if (NonTypeTemplateParmDecl *NTTP
= dyn_cast<NonTypeTemplateParmDecl>(*Param)) {
if (!hasReachableDefaultArgument(NTTP))
@@ -9486,7 +9455,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
SourceLocation ModulePrivateLoc, CXXScopeSpec &SS,
TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody) {
- assert(TUK != TUK_Reference && "References are not specializations");
+ assert(TUK != TagUseKind::Reference && "References are not specializations");
SourceLocation TemplateNameLoc = TemplateId.TemplateNameLoc;
SourceLocation LAngleLoc = TemplateId.LAngleLoc;
@@ -9508,7 +9477,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
bool isPartialSpecialization = false;
if (SS.isSet()) {
- if (TUK != TUK_Reference && TUK != TUK_Friend &&
+ if (TUK != TagUseKind::Reference && TUK != TagUseKind::Friend &&
diagnoseQualifiedDeclaration(SS, ClassTemplate->getDeclContext(),
ClassTemplate->getDeclName(),
TemplateNameLoc, &TemplateId,
@@ -9523,9 +9492,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
bool Invalid = false;
TemplateParameterList *TemplateParams =
MatchTemplateParametersToScopeSpecifier(
- KWLoc, TemplateNameLoc, SS, &TemplateId,
- TemplateParameterLists, TUK == TUK_Friend, isMemberSpecialization,
- Invalid);
+ KWLoc, TemplateNameLoc, SS, &TemplateId, TemplateParameterLists,
+ TUK == TagUseKind::Friend, isMemberSpecialization, Invalid);
if (Invalid)
return true;
@@ -9536,7 +9504,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
if (TemplateParams && TemplateParams->size() > 0) {
isPartialSpecialization = true;
- if (TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
Diag(KWLoc, diag::err_partial_specialization_friend)
<< SourceRange(LAngleLoc, RAngleLoc);
return true;
@@ -9572,14 +9540,15 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
}
}
} else if (TemplateParams) {
- if (TUK == TUK_Friend)
+ if (TUK == TagUseKind::Friend)
Diag(KWLoc, diag::err_template_spec_friend)
<< FixItHint::CreateRemoval(
SourceRange(TemplateParams->getTemplateLoc(),
TemplateParams->getRAngleLoc()))
<< SourceRange(LAngleLoc, RAngleLoc);
} else {
- assert(TUK == TUK_Friend && "should have a 'template<>' for this decl");
+ assert(TUK == TagUseKind::Friend &&
+ "should have a 'template<>' for this decl");
}
// Check that the specialization uses the same tag kind as the
@@ -9587,8 +9556,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
assert(Kind != TagTypeKind::Enum &&
"Invalid enum tag in class template spec!");
- if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(),
- Kind, TUK == TUK_Definition, KWLoc,
+ if (!isAcceptableTagRedeclaration(ClassTemplate->getTemplatedDecl(), Kind,
+ TUK == TagUseKind::Definition, KWLoc,
ClassTemplate->getIdentifier())) {
Diag(KWLoc, diag::err_use_with_wrong_tag)
<< ClassTemplate
@@ -9652,7 +9621,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// Check whether we can declare a class template specialization in
// the current scope.
- if (TUK != TUK_Friend &&
+ if (TUK != TagUseKind::Friend &&
CheckTemplateSpecializationScope(*this, ClassTemplate, PrevDecl,
TemplateNameLoc,
isPartialSpecialization))
@@ -9679,8 +9648,8 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// This rule has since been removed, because it's redundant given DR1495,
// but we keep it because it produces better diagnostics and recovery.
Diag(TemplateNameLoc, diag::err_partial_spec_args_match_primary_template)
- << /*class template*/0 << (TUK == TUK_Definition)
- << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
+ << /*class template*/ 0 << (TUK == TagUseKind::Definition)
+ << FixItHint::CreateRemoval(SourceRange(LAngleLoc, RAngleLoc));
return CheckClassTemplate(S, TagSpec, TUK, KWLoc, SS,
ClassTemplate->getIdentifier(),
TemplateNameLoc,
@@ -9772,11 +9741,11 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
}
// If this is not a friend, note that this is an explicit specialization.
- if (TUK != TUK_Friend)
+ if (TUK != TagUseKind::Friend)
Specialization->setSpecializationKind(TSK_ExplicitSpecialization);
// Check that this isn't a redefinition of this specialization.
- if (TUK == TUK_Definition) {
+ if (TUK == TagUseKind::Definition) {
RecordDecl *Def = Specialization->getDefinition();
NamedDecl *Hidden = nullptr;
if (Def && SkipBody && !hasVisibleDefinition(Def, &Hidden)) {
@@ -9797,7 +9766,7 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
// Add alignment attributes if necessary; these attributes are checked when
// the ASTContext lays out the structure.
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip)) {
AddAlignmentAttributesForRecord(Specialization);
AddMsStructLayoutForRecord(Specialization);
}
@@ -9818,10 +9787,10 @@ DeclResult Sema::ActOnClassTemplateSpecialization(
Specialization->setLexicalDeclContext(CurContext);
// We may be starting the definition of this specialization.
- if (TUK == TUK_Definition && (!SkipBody || !SkipBody->ShouldSkip))
+ if (TUK == TagUseKind::Definition && (!SkipBody || !SkipBody->ShouldSkip))
Specialization->startDefinition();
- if (TUK == TUK_Friend) {
+ if (TUK == TagUseKind::Friend) {
// Build the fully-sugared type for this class template
// specialization as the user wrote in the specialization
// itself. This means that we'll pretty-print the type retrieved
@@ -10290,15 +10259,20 @@ bool Sema::CheckFunctionTemplateSpecialization(
Ovl->getDeclContext()->getRedeclContext()))
continue;
+ QualType FT = FD->getType();
+ // C++11 [dcl.constexpr]p8:
+ // A constexpr specifier for a non-static member function that is not
+ // a constructor declares that member function to be const.
+ //
// When matching a constexpr member function template specialization
// against the primary template, we don't yet know whether the
// specialization has an implicit 'const' (because we don't know whether
// it will be a static member function until we know which template it
- // specializes), so adjust it now assuming it specializes this template.
- QualType FT = FD->getType();
- if (FD->isConstexpr()) {
- CXXMethodDecl *OldMD =
- dyn_cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
+ // specializes). This rule was removed in C++14.
+ if (auto *NewMD = dyn_cast<CXXMethodDecl>(FD);
+ !getLangOpts().CPlusPlus14 && NewMD && NewMD->isConstexpr() &&
+ !isa<CXXConstructorDecl, CXXDestructorDecl>(NewMD)) {
+ auto *OldMD = dyn_cast<CXXMethodDecl>(FunTmpl->getTemplatedDecl());
if (OldMD && OldMD->isConst()) {
const FunctionProtoType *FPT = FT->castAs<FunctionProtoType>();
FunctionProtoType::ExtProtoInfo EPI = FPT->getExtProtoInfo();
@@ -11124,8 +11098,7 @@ DeclResult Sema::ActOnExplicitInstantiation(
Def->setTemplateSpecializationKind(TSK);
if (!getDLLAttr(Def) && getDLLAttr(Specialization) &&
- (Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
- !Context.getTargetInfo().getTriple().isPS())) {
+ Context.getTargetInfo().shouldDLLImportComdatSymbols()) {
// An explicit instantiation definition can add a dll attribute to a
// template with a previous instantiation declaration. MinGW doesn't
// allow this.
@@ -11142,8 +11115,7 @@ DeclResult Sema::ActOnExplicitInstantiation(
bool NewlyDLLExported =
!PreviouslyDLLExported && Specialization->hasAttr<DLLExportAttr>();
if (Old_TSK == TSK_ImplicitInstantiation && NewlyDLLExported &&
- (Context.getTargetInfo().shouldDLLImportComdatSymbols() &&
- !Context.getTargetInfo().getTriple().isPS())) {
+ Context.getTargetInfo().shouldDLLImportComdatSymbols()) {
// An explicit instantiation definition can add a dll attribute to a
// template with a previous implicit instantiation. MinGW doesn't allow
// this. We limit clang to only adding dllexport, to avoid potentially
@@ -11192,11 +11164,13 @@ Sema::ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
bool Owned = false;
bool IsDependent = false;
- Decl *TagD = ActOnTag(S, TagSpec, Sema::TUK_Reference, KWLoc, SS, Name,
- NameLoc, Attr, AS_none, /*ModulePrivateLoc=*/SourceLocation(),
+ Decl *TagD =
+ ActOnTag(S, TagSpec, TagUseKind::Reference, KWLoc, SS, Name, NameLoc,
+ Attr, AS_none, /*ModulePrivateLoc=*/SourceLocation(),
MultiTemplateParamsArg(), Owned, IsDependent, SourceLocation(),
false, TypeResult(), /*IsTypeSpecifier*/ false,
- /*IsTemplateParamOrArg*/ false, /*OOK=*/OOK_Outside).get();
+ /*IsTemplateParamOrArg*/ false, /*OOK=*/OOK_Outside)
+ .get();
assert(!IsDependent && "explicit instantiation of dependent name not yet handled");
if (!TagD)
@@ -11727,9 +11701,9 @@ TypeResult Sema::ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
TagTypeKind Kind = TypeWithKeyword::getTagTypeKindForTypeSpec(TagSpec);
- if (TUK == TUK_Declaration || TUK == TUK_Definition) {
+ if (TUK == TagUseKind::Declaration || TUK == TagUseKind::Definition) {
Diag(NameLoc, diag::err_dependent_tag_decl)
- << (TUK == TUK_Definition) << llvm::to_underlying(Kind)
+ << (TUK == TagUseKind::Definition) << llvm::to_underlying(Kind)
<< SS.getRange();
return true;
}
diff --git a/clang/lib/Sema/SemaTemplateDeduction.cpp b/clang/lib/Sema/SemaTemplateDeduction.cpp
index 41fd210f29d0..0c348633576d 100644
--- a/clang/lib/Sema/SemaTemplateDeduction.cpp
+++ b/clang/lib/Sema/SemaTemplateDeduction.cpp
@@ -519,18 +519,14 @@ static NamedDecl *getTemplateParameterWithDefault(Sema &S, NamedDecl *A,
switch (A->getKind()) {
case Decl::TemplateTypeParm: {
auto *T = cast<TemplateTypeParmDecl>(A);
- // FIXME: A TemplateTypeParmDecl's DefaultArgument can't hold a full
- // TemplateArgument, so there is currently no way to specify a pack as a
- // default argument for these.
- if (T->isParameterPack())
- return A;
auto *R = TemplateTypeParmDecl::Create(
S.Context, A->getDeclContext(), SourceLocation(), SourceLocation(),
T->getDepth(), T->getIndex(), T->getIdentifier(),
- T->wasDeclaredWithTypename(), /*ParameterPack=*/false,
+ T->wasDeclaredWithTypename(), T->isParameterPack(),
T->hasTypeConstraint());
R->setDefaultArgument(
- S.Context.getTrivialTypeSourceInfo(Default.getAsType()));
+ S.Context,
+ S.getTrivialTemplateArgumentLoc(Default, QualType(), SourceLocation()));
if (R->hasTypeConstraint()) {
auto *C = R->getTypeConstraint();
R->setTypeConstraint(C->getConceptReference(),
diff --git a/clang/lib/Sema/SemaTemplateInstantiate.cpp b/clang/lib/Sema/SemaTemplateInstantiate.cpp
index 07626058c797..abb8a260faab 100644
--- a/clang/lib/Sema/SemaTemplateInstantiate.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiate.cpp
@@ -1619,11 +1619,6 @@ namespace {
case TemplateArgument::Pack:
// Literally rewrite the template argument pack, instead of unpacking
// it.
- assert(
- SemaRef.CodeSynthesisContexts.back().Kind ==
- Sema::CodeSynthesisContext::BuildingDeductionGuides &&
- "Transforming a template argument pack is only allowed in building "
- "deduction guide");
for (auto &pack : Arg.getPackAsArray()) {
TemplateArgumentLoc Input = SemaRef.getTrivialTemplateArgumentLoc(
pack, QualType(), SourceLocation{});
@@ -4375,9 +4370,9 @@ Sema::SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs) {
bool Sema::SubstTemplateArgument(
const TemplateArgumentLoc &Input,
const MultiLevelTemplateArgumentList &TemplateArgs,
- TemplateArgumentLoc &Output) {
- TemplateInstantiator Instantiator(*this, TemplateArgs, SourceLocation(),
- DeclarationName());
+ TemplateArgumentLoc &Output, SourceLocation Loc,
+ const DeclarationName &Entity) {
+ TemplateInstantiator Instantiator(*this, TemplateArgs, Loc, Entity);
return Instantiator.TransformTemplateArgument(Input, Output);
}
diff --git a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
index 381d79b2fcd4..38a300332068 100644
--- a/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
+++ b/clang/lib/Sema/SemaTemplateInstantiateDecl.cpp
@@ -2956,11 +2956,10 @@ Decl *TemplateDeclInstantiator::VisitTemplateTypeParmDecl(
}
}
if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited()) {
- TypeSourceInfo *InstantiatedDefaultArg =
- SemaRef.SubstType(D->getDefaultArgumentInfo(), TemplateArgs,
- D->getDefaultArgumentLoc(), D->getDeclName());
- if (InstantiatedDefaultArg)
- Inst->setDefaultArgument(InstantiatedDefaultArg);
+ TemplateArgumentLoc Output;
+ if (!SemaRef.SubstTemplateArgument(D->getDefaultArgument(), TemplateArgs,
+ Output))
+ Inst->setDefaultArgument(SemaRef.getASTContext(), Output);
}
// Introduce this template parameter's instantiation into the instantiation
diff --git a/clang/lib/Sema/SemaTemplateVariadic.cpp b/clang/lib/Sema/SemaTemplateVariadic.cpp
index a4b681ae4f00..0b2060466506 100644
--- a/clang/lib/Sema/SemaTemplateVariadic.cpp
+++ b/clang/lib/Sema/SemaTemplateVariadic.cpp
@@ -1128,7 +1128,7 @@ Sema::BuildPackIndexingExpr(Expr *PackExpression, SourceLocation EllipsisLoc,
return PackIndexingExpr::Create(getASTContext(), EllipsisLoc, RSquareLoc,
PackExpression, IndexExpr, Index,
- ExpandedExprs);
+ ExpandedExprs, EmptyPack);
}
TemplateArgumentLoc Sema::getTemplateArgumentPackExpansionPattern(
diff --git a/clang/lib/Sema/TreeTransform.h b/clang/lib/Sema/TreeTransform.h
index b10e5ba65eb1..6b53c2490cc4 100644
--- a/clang/lib/Sema/TreeTransform.h
+++ b/clang/lib/Sema/TreeTransform.h
@@ -42,6 +42,7 @@
#include "clang/Sema/SemaObjC.h"
#include "clang/Sema/SemaOpenACC.h"
#include "clang/Sema/SemaOpenMP.h"
+#include "clang/Sema/SemaPseudoObject.h"
#include "clang/Sema/SemaSYCL.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/Support/ErrorHandling.h"
@@ -3478,11 +3479,11 @@ public:
SS.Adopt(QualifierLoc);
if (TemplateArgs || TemplateKWLoc.isValid())
- return getSema().BuildQualifiedTemplateIdExpr(SS, TemplateKWLoc, NameInfo,
- TemplateArgs);
+ return getSema().BuildQualifiedTemplateIdExpr(
+ SS, TemplateKWLoc, NameInfo, TemplateArgs, IsAddressOfOperand);
return getSema().BuildQualifiedDeclarationNameExpr(
- SS, NameInfo, IsAddressOfOperand, /*S*/nullptr, RecoveryTSI);
+ SS, NameInfo, IsAddressOfOperand, RecoveryTSI);
}
/// Build a new template-id expression.
@@ -4818,14 +4819,6 @@ bool TreeTransform<Derived>::TransformTemplateArguments(
TemplateArgumentLoc In = *First;
if (In.getArgument().getKind() == TemplateArgument::Pack) {
- // When building the deduction guides, we rewrite the argument packs
- // instead of unpacking.
- if (getSema().CodeSynthesisContexts.back().Kind ==
- Sema::CodeSynthesisContext::BuildingDeductionGuides) {
- if (getDerived().TransformTemplateArgument(In, Out, Uneval))
- return true;
- continue;
- }
// Unpack argument packs, which we translate them into separate
// arguments.
// FIXME: We could do much better if we could guarantee that the
@@ -11133,7 +11126,8 @@ class OpenACCClauseTransform final
if (!Res.isUsable())
continue;
- Res = Self.getSema().OpenACC().ActOnVar(Res.get());
+ Res = Self.getSema().OpenACC().ActOnVar(ParsedClause.getClauseKind(),
+ Res.get());
if (Res.isUsable())
InstantiatedVarList.push_back(Res.get());
@@ -11493,6 +11487,24 @@ void OpenACCClauseTransform<Derived>::VisitDeviceTypeClause(
ParsedClause.getBeginLoc(), ParsedClause.getLParenLoc(),
C.getArchitectures(), ParsedClause.getEndLoc());
}
+
+template <typename Derived>
+void OpenACCClauseTransform<Derived>::VisitReductionClause(
+ const OpenACCReductionClause &C) {
+ SmallVector<Expr *> TransformedVars = VisitVarList(C.getVarList());
+ SmallVector<Expr *> ValidVars;
+
+ for (Expr *Var : TransformedVars) {
+ ExprResult Res = Self.getSema().OpenACC().CheckReductionVar(Var);
+ if (Res.isUsable())
+ ValidVars.push_back(Res.get());
+ }
+
+ NewClause = OpenACCReductionClause::Create(
+ Self.getSema().getASTContext(), ParsedClause.getBeginLoc(),
+ ParsedClause.getLParenLoc(), C.getReductionOp(), ValidVars,
+ ParsedClause.getEndLoc());
+}
} // namespace
template <typename Derived>
OpenACCClause *TreeTransform<Derived>::TransformOpenACCClause(
@@ -11890,7 +11902,7 @@ TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) {
// better solution (rebuilding the semantic expressions and
// rebinding OVEs as necessary) doesn't work; we'd need
// TreeTransform to not strip away implicit conversions.
- Expr *newSyntacticForm = SemaRef.recreateSyntacticForm(E);
+ Expr *newSyntacticForm = SemaRef.PseudoObject().recreateSyntacticForm(E);
ExprResult result = getDerived().TransformExpr(newSyntacticForm);
if (result.isInvalid()) return ExprError();
@@ -11898,7 +11910,7 @@ TreeTransform<Derived>::TransformPseudoObjectExpr(PseudoObjectExpr *E) {
// expression must have been an lvalue-to-rvalue conversion which we
// should reapply.
if (result.get()->hasPlaceholderType(BuiltinType::PseudoObject))
- result = SemaRef.checkPseudoObjectRValue(result.get());
+ result = SemaRef.PseudoObject().checkRValue(result.get());
return result;
}
@@ -14975,7 +14987,7 @@ TreeTransform<Derived>::TransformPackIndexingExpr(PackIndexingExpr *E) {
return ExprError();
SmallVector<Expr *, 5> ExpandedExprs;
- if (E->getExpressions().empty()) {
+ if (!E->expandsToEmptyPack() && E->getExpressions().empty()) {
Expr *Pattern = E->getPackIdExpression();
SmallVector<UnexpandedParameterPack, 2> Unexpanded;
getSema().collectUnexpandedParameterPacks(E->getPackIdExpression(),
@@ -15029,9 +15041,7 @@ TreeTransform<Derived>::TransformPackIndexingExpr(PackIndexingExpr *E) {
return true;
ExpandedExprs.push_back(Out.get());
}
- }
-
- else {
+ } else if (!E->expandsToEmptyPack()) {
if (getDerived().TransformExprs(E->getExpressions().data(),
E->getExpressions().size(), false,
ExpandedExprs))
@@ -16196,8 +16206,8 @@ ExprResult TreeTransform<Derived>::RebuildCXXOperatorCallExpr(
if (First->getObjectKind() == OK_ObjCProperty) {
BinaryOperatorKind Opc = BinaryOperator::getOverloadedOpcode(Op);
if (BinaryOperator::isAssignmentOp(Opc))
- return SemaRef.checkPseudoObjectAssignment(/*Scope=*/nullptr, OpLoc, Opc,
- First, Second);
+ return SemaRef.PseudoObject().checkAssignment(/*Scope=*/nullptr, OpLoc,
+ Opc, First, Second);
ExprResult Result = SemaRef.CheckPlaceholderExpr(First);
if (Result.isInvalid())
return ExprError();
diff --git a/clang/lib/Serialization/ASTReader.cpp b/clang/lib/Serialization/ASTReader.cpp
index f50f9569c0a5..d7fc6697eaf7 100644
--- a/clang/lib/Serialization/ASTReader.cpp
+++ b/clang/lib/Serialization/ASTReader.cpp
@@ -11921,6 +11921,13 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() {
return OpenACCDeviceTypeClause::Create(getContext(), ClauseKind, BeginLoc,
LParenLoc, Archs, EndLoc);
}
+ case OpenACCClauseKind::Reduction: {
+ SourceLocation LParenLoc = readSourceLocation();
+ OpenACCReductionOperator Op = readEnum<OpenACCReductionOperator>();
+ llvm::SmallVector<Expr *> VarList = readOpenACCVarList();
+ return OpenACCReductionClause::Create(getContext(), BeginLoc, LParenLoc, Op,
+ VarList, EndLoc);
+ }
case OpenACCClauseKind::Finalize:
case OpenACCClauseKind::IfPresent:
@@ -11937,7 +11944,6 @@ OpenACCClause *ASTRecordReader::readOpenACCClause() {
case OpenACCClauseKind::DeviceResident:
case OpenACCClauseKind::Host:
case OpenACCClauseKind::Link:
- case OpenACCClauseKind::Reduction:
case OpenACCClauseKind::Collapse:
case OpenACCClauseKind::Bind:
case OpenACCClauseKind::DeviceNum:
diff --git a/clang/lib/Serialization/ASTReaderDecl.cpp b/clang/lib/Serialization/ASTReaderDecl.cpp
index a6254b70560c..d7a9e31b477f 100644
--- a/clang/lib/Serialization/ASTReaderDecl.cpp
+++ b/clang/lib/Serialization/ASTReaderDecl.cpp
@@ -2695,7 +2695,8 @@ void ASTDeclReader::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
}
if (Record.readInt())
- D->setDefaultArgument(readTypeSourceInfo());
+ D->setDefaultArgument(Reader.getContext(),
+ Record.readTemplateArgumentLoc());
}
void ASTDeclReader::VisitNonTypeTemplateParmDecl(NonTypeTemplateParmDecl *D) {
diff --git a/clang/lib/Serialization/ASTReaderStmt.cpp b/clang/lib/Serialization/ASTReaderStmt.cpp
index 7d3930022a69..eac4faff2854 100644
--- a/clang/lib/Serialization/ASTReaderStmt.cpp
+++ b/clang/lib/Serialization/ASTReaderStmt.cpp
@@ -2177,6 +2177,7 @@ void ASTStmtReader::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
void ASTStmtReader::VisitPackIndexingExpr(PackIndexingExpr *E) {
VisitExpr(E);
E->TransformedExpressions = Record.readInt();
+ E->ExpandedToEmptyPack = Record.readInt();
E->EllipsisLoc = readSourceLocation();
E->RSquareLoc = readSourceLocation();
E->SubExprs[0] = Record.readStmt();
diff --git a/clang/lib/Serialization/ASTWriter.cpp b/clang/lib/Serialization/ASTWriter.cpp
index 1d6d96932ba2..00b0e4808321 100644
--- a/clang/lib/Serialization/ASTWriter.cpp
+++ b/clang/lib/Serialization/ASTWriter.cpp
@@ -7959,6 +7959,13 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) {
}
return;
}
+ case OpenACCClauseKind::Reduction: {
+ const auto *RC = cast<OpenACCReductionClause>(C);
+ writeSourceLocation(RC->getLParenLoc());
+ writeEnum(RC->getReductionOp());
+ writeOpenACCVarList(RC);
+ return;
+ }
case OpenACCClauseKind::Finalize:
case OpenACCClauseKind::IfPresent:
@@ -7975,7 +7982,6 @@ void ASTRecordWriter::writeOpenACCClause(const OpenACCClause *C) {
case OpenACCClauseKind::DeviceResident:
case OpenACCClauseKind::Host:
case OpenACCClauseKind::Link:
- case OpenACCClauseKind::Reduction:
case OpenACCClauseKind::Collapse:
case OpenACCClauseKind::Bind:
case OpenACCClauseKind::DeviceNum:
diff --git a/clang/lib/Serialization/ASTWriterDecl.cpp b/clang/lib/Serialization/ASTWriterDecl.cpp
index c2f1d1b44241..b2a214e935aa 100644
--- a/clang/lib/Serialization/ASTWriterDecl.cpp
+++ b/clang/lib/Serialization/ASTWriterDecl.cpp
@@ -1899,7 +1899,7 @@ void ASTDeclWriter::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
!D->defaultArgumentWasInherited();
Record.push_back(OwnsDefaultArg);
if (OwnsDefaultArg)
- Record.AddTypeSourceInfo(D->getDefaultArgumentInfo());
+ Record.AddTemplateArgumentLoc(D->getDefaultArgument());
if (!TC && !OwnsDefaultArg &&
D->getDeclContext() == D->getLexicalDeclContext() &&
diff --git a/clang/lib/Serialization/ASTWriterStmt.cpp b/clang/lib/Serialization/ASTWriterStmt.cpp
index 6f7c368ce9ca..a44852af97be 100644
--- a/clang/lib/Serialization/ASTWriterStmt.cpp
+++ b/clang/lib/Serialization/ASTWriterStmt.cpp
@@ -2157,11 +2157,11 @@ void ASTStmtWriter::VisitSizeOfPackExpr(SizeOfPackExpr *E) {
void ASTStmtWriter::VisitPackIndexingExpr(PackIndexingExpr *E) {
VisitExpr(E);
Record.push_back(E->TransformedExpressions);
+ Record.push_back(E->ExpandedToEmptyPack);
Record.AddSourceLocation(E->getEllipsisLoc());
Record.AddSourceLocation(E->getRSquareLoc());
Record.AddStmt(E->getPackIdExpression());
Record.AddStmt(E->getIndexExpr());
- Record.push_back(E->TransformedExpressions);
for (Expr *Sub : E->getExpressions())
Record.AddStmt(Sub);
Code = serialization::EXPR_PACK_INDEXING;
diff --git a/clang/test/AST/Interp/arrays.cpp b/clang/test/AST/Interp/arrays.cpp
index 929f25b95fa1..e936ec6dc894 100644
--- a/clang/test/AST/Interp/arrays.cpp
+++ b/clang/test/AST/Interp/arrays.cpp
@@ -26,6 +26,7 @@ static_assert(foo[2][2] == nullptr, "");
static_assert(foo[2][3] == &m, "");
static_assert(foo[2][4] == nullptr, "");
+constexpr int ZeroSizeArray[] = {};
constexpr int SomeInt[] = {1};
constexpr int getSomeInt() { return *SomeInt; }
@@ -595,3 +596,12 @@ int test_multiarray22() {
}
#endif
+
+namespace ArrayMemberAccess {
+ struct A {
+ int x;
+ };
+ void f(const A (&a)[]) {
+ bool cond = a->x;
+ }
+}
diff --git a/clang/test/AST/Interp/cxx11.cpp b/clang/test/AST/Interp/cxx11.cpp
index 993e3618a378..f06a5dd173cb 100644
--- a/clang/test/AST/Interp/cxx11.cpp
+++ b/clang/test/AST/Interp/cxx11.cpp
@@ -30,3 +30,19 @@ constexpr S s = { 5 };
constexpr const int *p = &s.m + 1;
constexpr const int *np2 = &(*(int(*)[4])nullptr)[0]; // ok
+
+constexpr int preDec(int x) { // both-error {{never produces a constant expression}}
+ return --x; // both-note {{subexpression}}
+}
+
+constexpr int postDec(int x) { // both-error {{never produces a constant expression}}
+ return x--; // both-note {{subexpression}}
+}
+
+constexpr int preInc(int x) { // both-error {{never produces a constant expression}}
+ return ++x; // both-note {{subexpression}}
+}
+
+constexpr int postInc(int x) { // both-error {{never produces a constant expression}}
+ return x++; // both-note {{subexpression}}
+}
diff --git a/clang/test/AST/Interp/functions.cpp b/clang/test/AST/Interp/functions.cpp
index a5bb9f1a19aa..e95ade8ef51b 100644
--- a/clang/test/AST/Interp/functions.cpp
+++ b/clang/test/AST/Interp/functions.cpp
@@ -617,3 +617,9 @@ namespace {
void bir [[clang::annotate("B", {1, 2, 3, 4})]] (); // both-error {{'annotate' attribute requires parameter 1 to be a constant expression}} \
// both-note {{subexpression not valid in a constant expression}}
}
+
+namespace FuncPtrParam {
+ void foo(int(&a)()) {
+ *a; // both-warning {{expression result unused}}
+ }
+}
diff --git a/clang/test/AST/Interp/records.cpp b/clang/test/AST/Interp/records.cpp
index 41be9b71a27f..3a5ecd291a56 100644
--- a/clang/test/AST/Interp/records.cpp
+++ b/clang/test/AST/Interp/records.cpp
@@ -1459,3 +1459,13 @@ namespace TemporaryWithInvalidDestructor {
// both-note {{in call to}}
#endif
}
+
+namespace IgnoredCtorWithZeroInit {
+ struct S {
+ int a;
+ };
+
+ bool get_status() {
+ return (S(), true);
+ }
+}
diff --git a/clang/test/AST/Interp/sycl.cpp b/clang/test/AST/Interp/sycl.cpp
new file mode 100644
index 000000000000..5c922eca5809
--- /dev/null
+++ b/clang/test/AST/Interp/sycl.cpp
@@ -0,0 +1,9 @@
+// RUN: %clang_cc1 %s -std=c++17 -triple x86_64-linux-gnu -fsycl-is-device -verify=both,ref -fsyntax-only -Wno-unused
+// RUN: %clang_cc1 %s -std=c++17 -triple x86_64-linux-gnu -fsycl-is-device -verify=both,expected -fsyntax-only -Wno-unused -fexperimental-new-constant-interpreter
+
+// both-no-diagnostics
+
+constexpr int a = 0;
+constexpr const char *a_name = __builtin_sycl_unique_stable_name(decltype(a));
+static_assert(__builtin_strcmp(a_name, "_ZTSKi") == 0);
+
diff --git a/clang/test/AST/ast-dump-ctad-alias.cpp b/clang/test/AST/ast-dump-ctad-alias.cpp
index 7fe6c05621ee..9382558393e4 100644
--- a/clang/test/AST/ast-dump-ctad-alias.cpp
+++ b/clang/test/AST/ast-dump-ctad-alias.cpp
@@ -48,3 +48,23 @@ Out2<double>::AInner t(1.0);
// CHECK-NEXT: | |-TemplateArgument type 'double'
// CHECK-NEXT: | | `-BuiltinType {{.*}} 'double'
// CHECK-NEXT: | `-ParmVarDecl {{.*}} 'double'
+
+template <typename... T1>
+struct Foo {
+ Foo(T1...);
+};
+
+template <typename...T2>
+using AFoo = Foo<T2...>;
+AFoo a(1, 2);
+// CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for AFoo> 'auto (type-parameter-0-0...) -> Foo<type-parameter-0-0...>'
+// CHECK-NEXT: | | `-ParmVarDecl {{.*}} 'type-parameter-0-0...' pack
+// CHECK-NEXT: | `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for AFoo> 'auto (int, int) -> Foo<int, int>' implicit_instantiation
+
+template <typename T>
+using BFoo = Foo<T, T>;
+BFoo b2(1.0, 2.0);
+// CHECK: |-CXXDeductionGuideDecl {{.*}} implicit <deduction guide for BFoo> 'auto (type-parameter-0-0, type-parameter-0-0) -> Foo<type-parameter-0-0, type-parameter-0-0>'
+// CHECK-NEXT: | | |-ParmVarDecl {{.*}} 'type-parameter-0-0'
+// CHECK-NEXT: | | `-ParmVarDecl {{.*}} 'type-parameter-0-0'
+// CHECK-NEXT: | `-CXXDeductionGuideDecl {{.*}} implicit used <deduction guide for BFoo> 'auto (double, double) -> Foo<double, double>' implicit_instantiation
diff --git a/clang/test/AST/ast-print-openacc-compute-construct.cpp b/clang/test/AST/ast-print-openacc-compute-construct.cpp
index 19965e749141..fe580c86ac8e 100644
--- a/clang/test/AST/ast-print-openacc-compute-construct.cpp
+++ b/clang/test/AST/ast-print-openacc-compute-construct.cpp
@@ -130,5 +130,33 @@ void foo() {
//CHECK: #pragma acc parallel device_type(SomeStructImpl)
#pragma acc parallel device_type (SomeStructImpl)
while(true);
+
+//CHECK: #pragma acc parallel reduction(+: iPtr)
+#pragma acc parallel reduction(+: iPtr)
+ while(true);
+//CHECK: #pragma acc parallel reduction(*: i)
+#pragma acc parallel reduction(*: i)
+ while(true);
+//CHECK: #pragma acc parallel reduction(max: SomeB)
+#pragma acc parallel reduction(max: SomeB)
+ while(true);
+//CHECK: #pragma acc parallel reduction(min: iPtr)
+#pragma acc parallel reduction(min: iPtr)
+ while(true);
+//CHECK: #pragma acc parallel reduction(&: i)
+#pragma acc parallel reduction(&: i)
+ while(true);
+//CHECK: #pragma acc parallel reduction(|: SomeB)
+#pragma acc parallel reduction(|: SomeB)
+ while(true);
+//CHECK: #pragma acc parallel reduction(^: iPtr)
+#pragma acc parallel reduction(^: iPtr)
+ while(true);
+//CHECK: #pragma acc parallel reduction(&&: i)
+#pragma acc parallel reduction(&&: i)
+ while(true);
+//CHECK: #pragma acc parallel reduction(||: SomeB)
+#pragma acc parallel reduction(||: SomeB)
+ while(true);
}
diff --git a/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp b/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp
index be07ab0a48b3..0fa98ad101f6 100644
--- a/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp
+++ b/clang/test/CXX/basic/basic.lookup/basic.lookup.qual/class.qual/p2.cpp
@@ -141,11 +141,15 @@ namespace InhCtor {
// ill-formed.
template<typename T>
struct S : T {
- struct U : S { // expected-note 6{{candidate}}
- using S::S;
- };
+ struct U; // expected-note 6{{candidate}}
using T::T;
};
+
+ template<typename T>
+ struct S<T>::U : S {
+ using S::S;
+ };
+
S<A>::U ua(0); // expected-error {{no match}}
S<B>::U ub(0); // expected-error {{no match}}
diff --git a/clang/test/CXX/class.derived/class.derived.general/p2.cpp b/clang/test/CXX/class.derived/class.derived.general/p2.cpp
new file mode 100644
index 000000000000..888d9cd7a939
--- /dev/null
+++ b/clang/test/CXX/class.derived/class.derived.general/p2.cpp
@@ -0,0 +1,116 @@
+// RUN: %clang_cc1 %s -fsyntax-only -verify
+
+namespace CurrentInstantiation {
+ template<typename T>
+ struct A0 { // expected-note 6{{definition of 'A0<T>' is not complete until the closing '}'}}
+ struct B0 : A0 { }; // expected-error {{base class has incomplete type}}
+
+ template<typename U>
+ struct B1 : A0 { }; // expected-error {{base class has incomplete type}}
+
+ struct B2;
+
+ template<typename U>
+ struct B3;
+
+ struct B4 { // expected-note 2{{definition of 'CurrentInstantiation::A0::B4' is not complete until the closing '}'}}
+ struct C0 : A0, B4 { }; // expected-error 2{{base class has incomplete type}}
+
+ template<typename V>
+ struct C1 : A0, B4 { }; // expected-error 2{{base class has incomplete type}}
+
+ struct C2;
+
+ template<typename V>
+ struct C3;
+ };
+
+ template<typename U>
+ struct B5 { // expected-note 2{{definition of 'B5<U>' is not complete until the closing '}'}}
+ struct C0 : A0, B5 { }; // expected-error 2{{base class has incomplete type}}
+
+ template<typename V>
+ struct C1 : A0, B5 { }; // expected-error 2{{base class has incomplete type}}
+
+ struct C2;
+
+ template<typename V>
+ struct C3;
+ };
+ };
+
+ template<typename T>
+ struct A0<T>::B2 : A0 { };
+
+ template<typename T>
+ template<typename U>
+ struct A0<T>::B3 : A0 { };
+
+ template<typename T>
+ struct A0<T>::B4::C2 : A0, B4 { };
+
+ template<typename T>
+ template<typename V>
+ struct A0<T>::B4::C3 : A0, B4 { };
+
+ template<typename T>
+ template<typename U>
+ struct A0<T>::B5<U>::C2 : A0, B5 { };
+
+ template<typename T>
+ template<typename U>
+ template<typename V>
+ struct A0<T>::B5<U>::C3 : A0, B5 { };
+
+ template<typename T>
+ struct A0<T*> { // expected-note 2{{definition of 'A0<type-parameter-0-0 *>' is not complete until the closing '}'}}
+ struct B0 : A0 { }; // expected-error {{base class has incomplete type}}
+
+ template<typename U>
+ struct B1 : A0 { }; // expected-error {{base class has incomplete type}}
+
+ struct B2;
+
+ template<typename U>
+ struct B3;
+ };
+
+ template<typename T>
+ struct A0<T*>::B2 : A0 { };
+
+ template<typename T>
+ template<typename U>
+ struct A0<T*>::B3 : A0 { };
+} // namespace CurrentInstantiation
+
+namespace MemberOfCurrentInstantiation {
+ template<typename T>
+ struct A0 {
+ struct B : B { }; // expected-error {{base class has incomplete type}}
+ // expected-note@-1 {{definition of 'MemberOfCurrentInstantiation::A0::B' is not complete until the closing '}'}}
+
+ template<typename U>
+ struct C : C<U> { }; // expected-error {{base class has incomplete type}}
+ // expected-note@-1 {{definition of 'C<U>' is not complete until the closing '}'}}
+ };
+
+ template<typename T>
+ struct A1 {
+ struct B; // expected-note {{definition of 'MemberOfCurrentInstantiation::A1<long>::B' is not complete until the closing '}'}}
+
+ struct C : B { }; // expected-error {{base class has incomplete type}}
+
+ struct B : C { }; // expected-note {{in instantiation of member class 'MemberOfCurrentInstantiation::A1<long>::C' requested here}}
+ };
+
+ template struct A1<long>; // expected-note {{in instantiation of member class 'MemberOfCurrentInstantiation::A1<long>::B' requested here}}
+
+ template<>
+ struct A1<short>::B {
+ static constexpr bool f() {
+ return true;
+ }
+ };
+
+ static_assert(A1<short>::C::f());
+} // namespace MemberOfCurrentInstantiation
diff --git a/clang/test/CXX/class/class.mfct/class.mfct.non-static/p3.cpp b/clang/test/CXX/class/class.mfct/class.mfct.non-static/p3.cpp
index 9116e7146f81..01fa923dd171 100644
--- a/clang/test/CXX/class/class.mfct/class.mfct.non-static/p3.cpp
+++ b/clang/test/CXX/class/class.mfct/class.mfct.non-static/p3.cpp
@@ -70,7 +70,7 @@ namespace test2 {
}
void test1() {
- B<T>::foo();
+ B<T>::foo(); // expected-error {{call to non-static member function without an object argument}}
}
static void test2() {
@@ -91,8 +91,95 @@ namespace test2 {
int test() {
A<int> a;
a.test0(); // no instantiation note here, decl is ill-formed
- a.test1();
+ a.test1(); // expected-note {{in instantiation}}
a.test2(); // expected-note {{in instantiation}}
a.test3(); // expected-note {{in instantiation}}
}
}
+
+namespace test3 {
+ struct A {
+ void f0();
+
+ template<typename T>
+ void f1();
+
+ static void f2();
+
+ template<typename T>
+ static void f3();
+
+ int x0;
+
+ static constexpr int x1 = 0;
+
+ template<typename T>
+ static constexpr int x2 = 0;
+ };
+
+ template<typename T>
+ struct B : T {
+ auto g0() -> decltype(T::f0());
+
+ auto g1() -> decltype(T::template f1<int>());
+
+ auto g2() -> decltype(T::f2());
+
+ auto g3() -> decltype(T::template f3<int>());
+
+ auto g4() -> decltype(T::x0);
+
+ auto g5() -> decltype(T::x1);
+
+ auto g6() -> decltype(T::template x2<int>);
+
+ decltype(T::f0()) g7(); // expected-error {{call to non-static member function without an object argument}}
+
+ decltype(T::template f1<int>()) g8(); // expected-error {{call to non-static member function without an object argument}}
+
+ decltype(T::f2()) g9();
+
+ decltype(T::template f3<int>()) g10();
+
+ decltype(T::x0) g11();
+
+ decltype(T::x1) g12();
+
+ decltype(T::template x2<int>) g13();
+ };
+
+ template struct B<A>; // expected-note {{in instantiation of}}
+
+ template<typename T>
+ struct C : T {
+ static auto g0() -> decltype(T::f0()); // expected-error {{'this' cannot be implicitly used in a static member function declaration}}
+
+ static auto g1() -> decltype(T::template f1<int>()); // expected-error {{'this' cannot be implicitly used in a static member function declaration}}
+
+ static auto g2() -> decltype(T::f2());
+
+ static auto g3() -> decltype(T::template f3<int>());
+
+ static auto g4() -> decltype(T::x0); // expected-error {{'this' cannot be implicitly used in a static member function declaration}}
+
+ static auto g5() -> decltype(T::x1);
+
+ static auto g6() -> decltype(T::template x2<int>);
+
+ static decltype(T::f0()) g7(); // expected-error {{call to non-static member function without an object argument}}
+
+ static decltype(T::template f1<int>()) g8(); // expected-error {{call to non-static member function without an object argument}}
+
+ static decltype(T::f2()) g9();
+
+ static decltype(T::template f3<int>()) g10();
+
+ static decltype(T::x0) g11();
+
+ static decltype(T::x1) g12();
+
+ static decltype(T::template x2<int>) g13();
+ };
+
+ template struct C<A>; // expected-note {{in instantiation of}}
+}
diff --git a/clang/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p1.cpp b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p1.cpp
index a28a5f91c477..9e890204c78b 100644
--- a/clang/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p1.cpp
+++ b/clang/test/CXX/dcl.dcl/dcl.spec/dcl.constexpr/p1.cpp
@@ -89,6 +89,9 @@ struct S {
template<typename T> constexpr T f(); // expected-warning 0-1{{C++14}} expected-note 0-1{{candidate}}
template <typename T>
T g() const; // expected-note-re {{candidate template ignored: could not match 'T (){{( __attribute__\(\(thiscall\)\))?}} const' against 'char (){{( __attribute__\(\(thiscall\)\))?}}'}}
+#if __cplusplus >= 201402L
+ // expected-note@-2 {{candidate template ignored: could not match 'T () const' against 'int ()'}}
+#endif
};
// explicit specialization can differ in constepxr
@@ -100,13 +103,17 @@ template <> notlit S::f() const { return notlit(); }
#if __cplusplus >= 201402L
// expected-error@-2 {{no function template matches}}
#endif
-template <> constexpr int S::g() { return 0; } // expected-note {{previous}}
+template <> constexpr int S::g() { return 0; }
#if __cplusplus < 201402L
// expected-warning@-2 {{C++14}}
+// expected-note@-3 {{previous}}
#else
-// expected-error@-4 {{does not match any declaration in 'S'}}
+// expected-error@-5 {{no function template matches function template specialization 'g'}}
+#endif
+template <> int S::g() const;
+#if __cplusplus < 201402L
+// expected-error@-2 {{non-constexpr declaration of 'g<int>' follows constexpr declaration}}
#endif
-template <> int S::g() const; // expected-error {{non-constexpr declaration of 'g<int>' follows constexpr declaration}}
// specializations can drop the 'constexpr' but not the implied 'const'.
template <> char S::g() { return 0; } // expected-error {{no function template matches}}
template <> double S::g() const { return 0; } // ok
@@ -154,3 +161,14 @@ namespace {
// FIXME: We should diagnose this prior to C++17.
const int &r = A::n;
}
+
+#if __cplusplus < 201402L
+namespace ImplicitConstexprDef {
+ struct A {
+ void f(); // expected-note {{member declaration does not match because it is not const qualified}}
+ };
+
+ constexpr void A::f() { } // expected-warning {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const' to avoid a change in behavior}}
+ // expected-error@-1 {{out-of-line definition of 'f' does not match any declaration in 'ImplicitConstexprDef::A'}}
+}
+#endif
diff --git a/clang/test/CXX/temp/temp.spec/temp.expl.spec/p12.cpp b/clang/test/CXX/temp/temp.spec/temp.expl.spec/p12.cpp
new file mode 100644
index 000000000000..2a5748908369
--- /dev/null
+++ b/clang/test/CXX/temp/temp.spec/temp.expl.spec/p12.cpp
@@ -0,0 +1,70 @@
+// RUN: %clang_cc1 -fsyntax-only -std=c++11 -verify=expected,cxx11 %s
+// RUN: %clang_cc1 -fsyntax-only -std=c++14 -verify=expected,since-cxx14 %s
+
+struct A {
+ template<typename T>
+ void f0();
+
+ template<>
+ constexpr void f0<short>(); // cxx11-error {{conflicting types for 'f0'}}
+ // cxx11-note@-1 {{previous declaration is here}}
+ // cxx11-warning@-2 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+ template<typename T>
+ void f1() const; // since-cxx14-note 2{{candidate template ignored: could not match 'void () const' against 'void ()'}}
+
+ template<>
+ constexpr void f1<short>(); // since-cxx14-error {{no function template matches function template specialization 'f1'}}
+ // cxx11-warning@-1 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+};
+
+template<>
+constexpr void A::f0<long>(); // cxx11-error {{conflicting types for 'f0'}}
+ // cxx11-note@-1 {{previous declaration is here}}
+ // cxx11-warning@-2 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+template<>
+constexpr void A::f1<long>(); // since-cxx14-error {{no function template matches function template specialization 'f1'}}
+ // cxx11-warning@-1 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+// FIXME: It's unclear whether [temp.expl.spec]p12 is intended to apply to
+// members of a class template explicitly specialized for an implicitly
+// instantiated specialization of that template.
+template<typename T>
+struct B {
+ void g0(); // since-cxx14-note {{previous declaration is here}}
+ // cxx11-note@-1 {{member declaration does not match because it is not const qualified}}
+
+ void g1() const; // since-cxx14-note {{member declaration does not match because it is const qualified}}
+ // cxx11-note@-1 {{previous declaration is here}}
+
+ template<typename U>
+ void h0(); // since-cxx14-note {{previous declaration is here}}
+
+ template<typename U>
+ void h1() const; // cxx11-note {{previous declaration is here}}
+};
+
+template<>
+constexpr void B<short>::g0(); // since-cxx14-error {{constexpr declaration of 'g0' follows non-constexpr declaration}}
+ // cxx11-error@-1 {{out-of-line declaration of 'g0' does not match any declaration in 'B<short>'}}
+ // cxx11-warning@-2 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+template<>
+constexpr void B<short>::g1(); // since-cxx14-error {{out-of-line declaration of 'g1' does not match any declaration in 'B<short>'}}
+ // cxx11-error@-1 {{constexpr declaration of 'g1' follows non-constexpr declaration}}
+ // cxx11-warning@-2 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+template<>
+template<typename U>
+constexpr void B<long>::h0(); // since-cxx14-error {{constexpr declaration of 'h0' follows non-constexpr declaration}}
+ // cxx11-error@-1 {{out-of-line declaration of 'h0' does not match any declaration in 'B<long>'}}
+ // cxx11-warning@-2 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+template<>
+template<typename U>
+constexpr void B<long>::h1(); // since-cxx14-error {{out-of-line declaration of 'h1' does not match any declaration in 'B<long>'}}
+ // cxx11-error@-1 {{constexpr declaration of 'h1' follows non-constexpr declaration}}
+ // cxx11-warning@-2 {{'constexpr' non-static member function will not be implicitly 'const' in C++14; add 'const'}}
+
+
diff --git a/clang/test/CodeGen/aarch64-byval-temp.c b/clang/test/CodeGen/aarch64-byval-temp.c
index e9e2586406e5..0384830c69a4 100644
--- a/clang/test/CodeGen/aarch64-byval-temp.c
+++ b/clang/test/CodeGen/aarch64-byval-temp.c
@@ -1,13 +1,14 @@
-// RUN: %clang_cc1 -emit-llvm -triple arm64-- -o - %s -O0 | FileCheck %s --check-prefix=CHECK-O0
-// RUN: %clang_cc1 -emit-llvm -disable-llvm-optzns -triple arm64-- -o - %s -O3 | FileCheck %s --check-prefix=CHECK-O3
+// RUN: %clang_cc1 -emit-llvm -triple arm64-- -fexperimental-max-bitint-width=1024 -o - %s -O0 | FileCheck %s --check-prefix=CHECK-O0
+// RUN: %clang_cc1 -emit-llvm -disable-llvm-optzns -fexperimental-max-bitint-width=1024 -triple arm64-- -o - %s -O3 | FileCheck %s --check-prefix=CHECK-O3
struct large {
void* pointers[8];
};
void pass_large(struct large);
+void pass_large_BitInt(_BitInt(129));
-// For arm64, we don't use byval to pass structs but instead we create
+// For arm64, we don't use byval to pass structs and _BitInt(>128) type, but instead we create
// temporary allocas.
//
// Make sure we generate the appropriate lifetime markers for the temporary
@@ -71,3 +72,41 @@ void example(void) {
// Mark the end of the lifetime of `l`.
// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 64, ptr %l)
// CHECK-O3-NEXT: ret void
+
+void example_BitInt(void) {
+ _BitInt(129) l = {0};
+ pass_large_BitInt(l);
+ pass_large_BitInt(l);
+}
+// CHECK-O0-LABEL: define dso_local void @example_BitInt(
+// CHECK-O0-NEXT: entry:
+// CHECK-O0-NEXT: [[L:%.*]] = alloca i129, align 16
+// CHECK-O0-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i129, align 16
+// CHECK-O0-NEXT: [[INDIRECT_ARG_TEMP1:%.*]] = alloca i129, align 16
+// CHECK-O0-NEXT: store i129 0, ptr [[L]], align 16
+// CHECK-O0-NEXT: [[TMP0:%.*]] = load i129, ptr [[L]], align 16
+// CHECK-O0-NEXT: store i129 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 16
+// CHECK-O0-NEXT: call void @pass_large_BitInt(ptr noundef [[INDIRECT_ARG_TEMP]])
+// CHECK-O0-NEXT: [[TMP1:%.*]] = load i129, ptr [[L]], align 16
+// CHECK-O0-NEXT: store i129 [[TMP1]], ptr [[INDIRECT_ARG_TEMP1]], align 16
+// CHECK-O0-NEXT: call void @pass_large_BitInt(ptr noundef [[INDIRECT_ARG_TEMP1]])
+// CHECK-O0-NEXT: ret void
+//
+// CHECK-O3-LABEL: define dso_local void @example_BitInt(
+// CHECK-O3-NEXT: entry:
+// CHECK-O3-NEXT: [[L:%.*]] = alloca i129, align 16
+// CHECK-O3-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca i129, align 16
+// CHECK-O3-NEXT: [[INDIRECT_ARG_TEMP1:%.*]] = alloca i129, align 16
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[L]])
+// CHECK-O3-NEXT: store i129 0, ptr [[L]], align 16, !tbaa [[TBAA6:![0-9]+]]
+// CHECK-O3-NEXT: [[TMP0:%.*]] = load i129, ptr [[L]], align 16, !tbaa [[TBAA6]]
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[INDIRECT_ARG_TEMP]])
+// CHECK-O3-NEXT: store i129 [[TMP0]], ptr [[INDIRECT_ARG_TEMP]], align 16, !tbaa [[TBAA6]]
+// CHECK-O3-NEXT: call void @pass_large_BitInt(ptr noundef [[INDIRECT_ARG_TEMP]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[INDIRECT_ARG_TEMP]])
+// CHECK-O3-NEXT: [[TMP1:%.*]] = load i129, ptr [[L]], align 16, !tbaa [[TBAA6]]
+// CHECK-O3-NEXT: call void @llvm.lifetime.start.p0(i64 32, ptr [[INDIRECT_ARG_TEMP1]])
+// CHECK-O3-NEXT: store i129 [[TMP1]], ptr [[INDIRECT_ARG_TEMP1]], align 16, !tbaa [[TBAA6]]
+// CHECK-O3-NEXT: call void @pass_large_BitInt(ptr noundef [[INDIRECT_ARG_TEMP1]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[INDIRECT_ARG_TEMP1]])
+// CHECK-O3-NEXT: call void @llvm.lifetime.end.p0(i64 32, ptr [[L]])
diff --git a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c
index a4abe96cc08a..55e1ed393d84 100644
--- a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c
+++ b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.c
@@ -88,8 +88,10 @@ typedef svint8_t vec2 __attribute__((arm_sve_vector_bits(N)));
// CHECK-NEXT: entry:
// CHECK-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS,8)]] x i8>, align 16
// CHECK-NEXT: [[X:%.*]] = tail call <[[#div(VBITS,8)]] x i8> @llvm.vector.extract.v[[#div(VBITS,8)]]i8.nxv16i8(<vscale x 16 x i8> [[X_COERCE:%.*]], i64 0)
+// CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 [[SIZE:[0-9]+]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: store <[[#div(VBITS,8)]] x i8> [[X]], ptr [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6]]
// CHECK-NEXT: call void @f3(ptr noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
+// CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 [[SIZE]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECK-NEXT: ret void
// CHECK128-LABEL: declare void @f3(<16 x i8> noundef)
diff --git a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
index 05587fd9e7fe..30ea73b63bce 100644
--- a/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
+++ b/clang/test/CodeGen/aarch64-sve-acle-__ARM_FEATURE_SVE_VECTOR_OPERATORS.cpp
@@ -73,8 +73,10 @@ typedef svint16_t vec2 __attribute__((arm_sve_vector_bits(N)));
// CHECK128-NEXT: ret void
// CHECKWIDE-NEXT: [[INDIRECT_ARG_TEMP:%.*]] = alloca <[[#div(VBITS, 16)]] x i16>, align 16
// CHECKWIDE-NEXT: [[X:%.*]] = tail call <[[#div(VBITS, 16)]] x i16> @llvm.vector.extract.v[[#div(VBITS, 16)]]i16.nxv8i16(<vscale x 8 x i16> [[X_COERCE:%.*]], i64 0)
+// CHECKWIDE-NEXT: call void @llvm.lifetime.start.p0(i64 [[SIZE:[0-9]+]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECKWIDE-NEXT: store <[[#div(VBITS, 16)]] x i16> [[X]], ptr [[INDIRECT_ARG_TEMP]], align 16, [[TBAA6:!tbaa !.*]]
// CHECKWIDE-NEXT: call void @_Z1fDv[[#div(VBITS, 16)]]_s(ptr noundef nonnull [[INDIRECT_ARG_TEMP]]) [[ATTR5:#.*]]
+// CHECKWIDE-NEXT: call void @llvm.lifetime.end.p0(i64 [[SIZE]], ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR6:[0-9]+]]
// CHECKWIDE-NEXT: ret void
void g(vec2 x) { f(x); } // OK
#endif
diff --git a/clang/test/CodeGen/aarch64-varargs.c b/clang/test/CodeGen/aarch64-varargs.c
index ee4e88eda4ef..8952d6980a8d 100644
--- a/clang/test/CodeGen/aarch64-varargs.c
+++ b/clang/test/CodeGen/aarch64-varargs.c
@@ -63,10 +63,8 @@ __int128 aligned_int(void) {
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load ptr, ptr @the_list
-// CHECK: [[STACKINT:%[a-z_0-9]+]] = ptrtoint ptr [[STACK]] to i64
-// CHECK: [[ALIGN_STACK:%[a-z_0-9]+]] = add i64 [[STACKINT]], 15
-// CHECK: [[ALIGNED_STACK_INT:%[a-z_0-9]+]] = and i64 [[ALIGN_STACK]], -16
-// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9]+]] = inttoptr i64 [[ALIGNED_STACK_INT]] to ptr
+// CHECK: [[STACKINC:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[STACK]], i32 15
+// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9.]+]] = call ptr @llvm.ptrmask.p0.i64(ptr [[STACKINC]], i64 -16)
// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[ALIGNED_STACK_PTR]], i64 16
// CHECK: store ptr [[NEW_STACK]], ptr @the_list
// CHECK: br label %[[VAARG_END]]
@@ -377,10 +375,8 @@ underaligned_int128 underaligned_int128_test(void) {
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load ptr, ptr @the_list
-// CHECK: [[STACKINT:%[a-z_0-9]+]] = ptrtoint ptr [[STACK]] to i64
-// CHECK: [[ALIGN_STACK:%[a-z_0-9]+]] = add i64 [[STACKINT]], 15
-// CHECK: [[ALIGNED_STACK_INT:%[a-z_0-9]+]] = and i64 [[ALIGN_STACK]], -16
-// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9]+]] = inttoptr i64 [[ALIGNED_STACK_INT]] to ptr
+// CHECK: [[STACKINC:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[STACK]], i32 15
+// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9.]+]] = call ptr @llvm.ptrmask.p0.i64(ptr [[STACKINC]], i64 -16)
// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[ALIGNED_STACK_PTR]], i64 16
// CHECK: store ptr [[NEW_STACK]], ptr @the_list
// CHECK: br label %[[VAARG_END]]
@@ -414,10 +410,8 @@ overaligned_int128 overaligned_int128_test(void) {
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load ptr, ptr @the_list
-// CHECK: [[STACKINT:%[a-z_0-9]+]] = ptrtoint ptr [[STACK]] to i64
-// CHECK: [[ALIGN_STACK:%[a-z_0-9]+]] = add i64 [[STACKINT]], 15
-// CHECK: [[ALIGNED_STACK_INT:%[a-z_0-9]+]] = and i64 [[ALIGN_STACK]], -16
-// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9]+]] = inttoptr i64 [[ALIGNED_STACK_INT]] to ptr
+// CHECK: [[STACKINC:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[STACK]], i32 15
+// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9.]+]] = call ptr @llvm.ptrmask.p0.i64(ptr [[STACKINC]], i64 -16)
// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[ALIGNED_STACK_PTR]], i64 16
// CHECK: store ptr [[NEW_STACK]], ptr @the_list
// CHECK: br label %[[VAARG_END]]
@@ -688,10 +682,8 @@ overaligned_int_struct_member overaligned_int_struct_member_test(void) {
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load ptr, ptr @the_list
-// CHECK: [[STACKINT:%[a-z_0-9]+]] = ptrtoint ptr [[STACK]] to i64
-// CHECK: [[ALIGN_STACK:%[a-z_0-9]+]] = add i64 [[STACKINT]], 15
-// CHECK: [[ALIGNED_STACK_INT:%[a-z_0-9]+]] = and i64 [[ALIGN_STACK]], -16
-// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9]+]] = inttoptr i64 [[ALIGNED_STACK_INT]] to ptr
+// CHECK: [[STACKINC:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[STACK]], i32 15
+// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9.]+]] = call ptr @llvm.ptrmask.p0.i64(ptr [[STACKINC]], i64 -16)
// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[ALIGNED_STACK_PTR]], i64 16
// CHECK: store ptr [[NEW_STACK]], ptr @the_list
// CHECK: br label %[[VAARG_END]]
@@ -756,10 +748,8 @@ overaligned_long_long_struct_member overaligned_long_long_struct_member_test(voi
// CHECK: [[VAARG_ON_STACK]]
// CHECK: [[STACK:%[a-z_0-9]+]] = load ptr, ptr @the_list
-// CHECK: [[STACKINT:%[a-z_0-9]+]] = ptrtoint ptr [[STACK]] to i64
-// CHECK: [[ALIGN_STACK:%[a-z_0-9]+]] = add i64 [[STACKINT]], 15
-// CHECK: [[ALIGNED_STACK_INT:%[a-z_0-9]+]] = and i64 [[ALIGN_STACK]], -16
-// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9]+]] = inttoptr i64 [[ALIGNED_STACK_INT]] to ptr
+// CHECK: [[STACKINC:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[STACK]], i32 15
+// CHECK: [[ALIGNED_STACK_PTR:%[a-z_0-9.]+]] = call ptr @llvm.ptrmask.p0.i64(ptr [[STACKINC]], i64 -16)
// CHECK: [[NEW_STACK:%[a-z_0-9]+]] = getelementptr inbounds i8, ptr [[ALIGNED_STACK_PTR]], i64 16
// CHECK: store ptr [[NEW_STACK]], ptr @the_list
// CHECK: br label %[[VAARG_END]]
diff --git a/clang/test/CodeGen/darwin-target-variant.c b/clang/test/CodeGen/darwin-target-variant.c
index 36caaaec1bdb..9f4b36a790db 100644
--- a/clang/test/CodeGen/darwin-target-variant.c
+++ b/clang/test/CodeGen/darwin-target-variant.c
@@ -2,5 +2,5 @@
// CHECK: !llvm.module.flags = !{!0, !1, !2
// CHECK: !0 = !{i32 2, !"SDK Version", [2 x i32] [i32 11, i32 1]}
-// CHECK: !1 = !{i32 4, !"darwin.target_variant.triple", !"x86_64-apple-ios14-macabi"}
+// CHECK: !1 = !{i32 2, !"darwin.target_variant.triple", !"x86_64-apple-ios14-macabi"}
// CHECK: !2 = !{i32 2, !"darwin.target_variant.SDK Version", [2 x i32] [i32 14, i32 1]}
diff --git a/clang/test/CodeGen/fat-lto-objects.c b/clang/test/CodeGen/fat-lto-objects.c
index b50567c024fc..36a73684e7bf 100644
--- a/clang/test/CodeGen/fat-lto-objects.c
+++ b/clang/test/CodeGen/fat-lto-objects.c
@@ -62,7 +62,7 @@
// ELF: .llvm.lto
-// ASM: .section .llvm.lto,"e",@progbits
+// ASM: .section .llvm.lto,"e",@llvm_lto
// ASM-NEXT: .Lllvm.embedded.object:
// ASM-NEXT: .asciz "BC
// ASM-NEXT: .size .Lllvm.embedded.object
diff --git a/clang/test/CodeGen/functions.c b/clang/test/CodeGen/functions.c
index 1bbaa80d653c..0cc999aa4916 100644
--- a/clang/test/CodeGen/functions.c
+++ b/clang/test/CodeGen/functions.c
@@ -61,3 +61,15 @@ static void test9_helper(void) {}
void test9(void) {
(void) test9_helper;
}
+
+// PR88917: don't crash
+int b();
+
+int main() {
+ return b(b);
+ // CHECK: call i32 @b(ptr noundef @b)
+}
+int b(int (*f)()){
+ return 0;
+}
+// CHECK-LABEL: define{{.*}} i32 @b(ptr noundef %f)
diff --git a/clang/test/CodeGen/nofpclass.c b/clang/test/CodeGen/nofpclass.c
index dd90d02f7759..fc4c64f9b921 100644
--- a/clang/test/CodeGen/nofpclass.c
+++ b/clang/test/CodeGen/nofpclass.c
@@ -172,7 +172,7 @@ double2 defined_func_v2f64(double2 a, double2 b, double2 c) {
// CLFINITEONLY-LABEL: define dso_local nofpclass(nan inf) float @call_extern_func
// CLFINITEONLY-SAME: (float noundef nofpclass(nan inf) [[A:%.*]], double noundef nofpclass(nan inf) [[B:%.*]], half noundef nofpclass(nan inf) [[C:%.*]]) local_unnamed_addr #[[ATTR3:[0-9]+]] {
// CLFINITEONLY-NEXT: entry:
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float @extern_func(float noundef nofpclass(nan inf) [[A]], double noundef nofpclass(nan inf) [[B]], half noundef nofpclass(nan inf) [[C]]) #[[ATTR10:[0-9]+]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float @extern_func(float noundef nofpclass(nan inf) [[A]], double noundef nofpclass(nan inf) [[B]], half noundef nofpclass(nan inf) [[C]]) #[[ATTR11:[0-9]+]]
// CLFINITEONLY-NEXT: ret float [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -249,7 +249,7 @@ float call_extern_func(float a, double b, _Float16 c) {
// CLFINITEONLY-LABEL: define dso_local nofpclass(nan inf) double @call_extern_func_vec
// CLFINITEONLY-SAME: (double noundef nofpclass(nan inf) [[A_COERCE:%.*]], <2 x double> noundef nofpclass(nan inf) [[B:%.*]], i32 noundef [[C_COERCE:%.*]]) local_unnamed_addr #[[ATTR5:[0-9]+]] {
// CLFINITEONLY-NEXT: entry:
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) double @extern_func_vec(double noundef nofpclass(nan inf) [[A_COERCE]], <2 x double> noundef nofpclass(nan inf) [[B]], i32 noundef [[C_COERCE]]) #[[ATTR10]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) double @extern_func_vec(double noundef nofpclass(nan inf) [[A_COERCE]], <2 x double> noundef nofpclass(nan inf) [[B]], i32 noundef [[C_COERCE]]) #[[ATTR11]]
// CLFINITEONLY-NEXT: ret double [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -389,7 +389,7 @@ float2 call_extern_func_vec(float2 a, double2 b, half2 c) {
// CLFINITEONLY-LABEL: define dso_local nofpclass(nan inf) <2 x float> @defined_complex_func
// CLFINITEONLY-SAME: (<2 x float> noundef nofpclass(nan inf) [[A_COERCE:%.*]], double noundef nofpclass(nan inf) [[B_COERCE0:%.*]], double noundef nofpclass(nan inf) [[B_COERCE1:%.*]], <2 x half> noundef nofpclass(nan inf) [[C_COERCE:%.*]]) local_unnamed_addr #[[ATTR6:[0-9]+]] {
// CLFINITEONLY-NEXT: entry:
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) <2 x float> @extern_complex(<2 x float> noundef nofpclass(nan inf) [[A_COERCE]], double noundef nofpclass(nan inf) [[B_COERCE0]], double noundef nofpclass(nan inf) [[B_COERCE1]], <2 x half> noundef nofpclass(nan inf) [[C_COERCE]]) #[[ATTR10]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) <2 x float> @extern_complex(<2 x float> noundef nofpclass(nan inf) [[A_COERCE]], double noundef nofpclass(nan inf) [[B_COERCE0]], double noundef nofpclass(nan inf) [[B_COERCE1]], <2 x half> noundef nofpclass(nan inf) [[C_COERCE]]) #[[ATTR11]]
// CLFINITEONLY-NEXT: ret <2 x float> [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -927,12 +927,14 @@ _Complex _Float16 defined_complex_func_f16_ret(_Complex _Float16 c) {
// CLFINITEONLY-NEXT: [[CF16_REAL:%.*]] = load half, ptr [[CF16]], align 8
// CLFINITEONLY-NEXT: [[CF16_IMAGP:%.*]] = getelementptr inbounds i8, ptr [[CF16]], i64 2
// CLFINITEONLY-NEXT: [[CF16_IMAG:%.*]] = load half, ptr [[CF16_IMAGP]], align 2
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR12:[0-9]+]]
// CLFINITEONLY-NEXT: [[INDIRECT_ARG_TEMP_IMAGP:%.*]] = getelementptr inbounds i8, ptr [[INDIRECT_ARG_TEMP]], i64 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE0]], ptr [[INDIRECT_ARG_TEMP]], align 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE1]], ptr [[INDIRECT_ARG_TEMP_IMAGP]], align 8
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x half> poison, half [[CF16_REAL]], i64 0
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_2_VEC_INSERT:%.*]] = insertelement <2 x half> [[COERCE5_SROA_0_0_VEC_INSERT]], half [[CF16_IMAG]], i64 1
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float (float, ...) @variadic(float noundef nofpclass(nan inf) [[F32]], double noundef nofpclass(nan inf) [[CONV]], double noundef nofpclass(nan inf) [[F64]], half noundef nofpclass(nan inf) [[F16]], double noundef nofpclass(nan inf) [[V2F32_COERCE]], <2 x double> noundef nofpclass(nan inf) [[V2F64]], i32 noundef [[V2F16_COERCE]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE]], ptr noundef nonnull byval({ double, double }) align 8 [[INDIRECT_ARG_TEMP]], <2 x half> noundef nofpclass(nan inf) [[COERCE5_SROA_0_2_VEC_INSERT]]) #[[ATTR10]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float (float, ...) @variadic(float noundef nofpclass(nan inf) [[F32]], double noundef nofpclass(nan inf) [[CONV]], double noundef nofpclass(nan inf) [[F64]], half noundef nofpclass(nan inf) [[F16]], double noundef nofpclass(nan inf) [[V2F32_COERCE]], <2 x double> noundef nofpclass(nan inf) [[V2F64]], i32 noundef [[V2F16_COERCE]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE]], ptr noundef nonnull byval({ double, double }) align 8 [[INDIRECT_ARG_TEMP]], <2 x half> noundef nofpclass(nan inf) [[COERCE5_SROA_0_2_VEC_INSERT]]) #[[ATTR11]]
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR12]]
// CLFINITEONLY-NEXT: ret float [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -1178,12 +1180,14 @@ float call_variadic(float f32, double f64, _Float16 f16,
// CLFINITEONLY-NEXT: [[CF16_REAL:%.*]] = load half, ptr [[CF16]], align 8
// CLFINITEONLY-NEXT: [[CF16_IMAGP:%.*]] = getelementptr inbounds i8, ptr [[CF16]], i64 2
// CLFINITEONLY-NEXT: [[CF16_IMAG:%.*]] = load half, ptr [[CF16_IMAGP]], align 2
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.start.p0(i64 16, ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR12]]
// CLFINITEONLY-NEXT: [[INDIRECT_ARG_TEMP_IMAGP:%.*]] = getelementptr inbounds i8, ptr [[INDIRECT_ARG_TEMP]], i64 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE0]], ptr [[INDIRECT_ARG_TEMP]], align 8
// CLFINITEONLY-NEXT: store double [[CF64_COERCE1]], ptr [[INDIRECT_ARG_TEMP_IMAGP]], align 8
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_0_VEC_INSERT:%.*]] = insertelement <2 x half> poison, half [[CF16_REAL]], i64 0
// CLFINITEONLY-NEXT: [[COERCE5_SROA_0_2_VEC_INSERT:%.*]] = insertelement <2 x half> [[COERCE5_SROA_0_0_VEC_INSERT]], half [[CF16_IMAG]], i64 1
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float (float, ...) [[FPTR]](float noundef nofpclass(nan inf) [[F32]], double noundef nofpclass(nan inf) [[CONV]], double noundef nofpclass(nan inf) [[F64]], half noundef nofpclass(nan inf) [[F16]], double noundef nofpclass(nan inf) [[V2F32_COERCE]], <2 x double> noundef nofpclass(nan inf) [[V2F64]], i32 noundef [[V2F16_COERCE]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE]], ptr noundef nonnull byval({ double, double }) align 8 [[INDIRECT_ARG_TEMP]], <2 x half> noundef nofpclass(nan inf) [[COERCE5_SROA_0_2_VEC_INSERT]]) #[[ATTR10]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) float (float, ...) [[FPTR]](float noundef nofpclass(nan inf) [[F32]], double noundef nofpclass(nan inf) [[CONV]], double noundef nofpclass(nan inf) [[F64]], half noundef nofpclass(nan inf) [[F16]], double noundef nofpclass(nan inf) [[V2F32_COERCE]], <2 x double> noundef nofpclass(nan inf) [[V2F64]], i32 noundef [[V2F16_COERCE]], <2 x float> noundef nofpclass(nan inf) [[CF32_COERCE]], ptr noundef nonnull byval({ double, double }) align 8 [[INDIRECT_ARG_TEMP]], <2 x half> noundef nofpclass(nan inf) [[COERCE5_SROA_0_2_VEC_INSERT]]) #[[ATTR11]]
+// CLFINITEONLY-NEXT: call void @llvm.lifetime.end.p0(i64 16, ptr nonnull [[INDIRECT_ARG_TEMP]]) #[[ATTR12]]
// CLFINITEONLY-NEXT: ret float [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -1364,9 +1368,9 @@ extern __m256d extern_m256d(__m256d, ...);
//
// CLFINITEONLY: Function Attrs: convergent norecurse nounwind
// CLFINITEONLY-LABEL: define dso_local nofpclass(nan inf) <4 x double> @call_m256d
-// CLFINITEONLY-SAME: (<4 x double> noundef nofpclass(nan inf) [[X:%.*]]) local_unnamed_addr #[[ATTR8:[0-9]+]] {
+// CLFINITEONLY-SAME: (<4 x double> noundef nofpclass(nan inf) [[X:%.*]]) local_unnamed_addr #[[ATTR9:[0-9]+]] {
// CLFINITEONLY-NEXT: entry:
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) <4 x double> (<4 x double>, ...) @extern_m256d(<4 x double> noundef nofpclass(nan inf) [[X]], <4 x double> noundef nofpclass(nan inf) [[X]]) #[[ATTR10]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) <4 x double> (<4 x double>, ...) @extern_m256d(<4 x double> noundef nofpclass(nan inf) [[X]], <4 x double> noundef nofpclass(nan inf) [[X]]) #[[ATTR11]]
// CLFINITEONLY-NEXT: ret <4 x double> [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
@@ -1407,9 +1411,9 @@ __m256d call_m256d(__m256d x) {
//
// CLFINITEONLY: Function Attrs: convergent norecurse nounwind
// CLFINITEONLY-LABEL: define dso_local nofpclass(nan inf) <25 x double> @call_matrix
-// CLFINITEONLY-SAME: (<25 x double> noundef nofpclass(nan inf) [[X:%.*]]) local_unnamed_addr #[[ATTR9:[0-9]+]] {
+// CLFINITEONLY-SAME: (<25 x double> noundef nofpclass(nan inf) [[X:%.*]]) local_unnamed_addr #[[ATTR10:[0-9]+]] {
// CLFINITEONLY-NEXT: entry:
-// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) <25 x double> @extern_matrix(<25 x double> noundef nofpclass(nan inf) [[X]]) #[[ATTR10]]
+// CLFINITEONLY-NEXT: [[CALL:%.*]] = tail call nnan ninf nofpclass(nan inf) <25 x double> @extern_matrix(<25 x double> noundef nofpclass(nan inf) [[X]]) #[[ATTR11]]
// CLFINITEONLY-NEXT: ret <25 x double> [[CALL]]
//
// NONANS: Function Attrs: noinline nounwind optnone
diff --git a/clang/test/CodeGenCXX/wasm-eh.cpp b/clang/test/CodeGenCXX/wasm-eh.cpp
index 1b17498ba9ce..9dc15633bfed 100644
--- a/clang/test/CodeGenCXX/wasm-eh.cpp
+++ b/clang/test/CodeGenCXX/wasm-eh.cpp
@@ -1,4 +1,8 @@
// REQUIRES: webassembly-registered-target
+
+// RUN: %clang -E -dM %s -target wasm32-unknown-unknown -fwasm-exceptions | FileCheck %s -check-prefix PREPROCESSOR
+// PREPROCESSOR: #define __WASM_EXCEPTIONS__ 1
+
// RUN: %clang_cc1 %s -triple wasm32-unknown-unknown -fms-extensions -fexceptions -fcxx-exceptions -mllvm -wasm-enable-eh -exception-model=wasm -target-feature +exception-handling -emit-llvm -o - -std=c++11 | FileCheck %s
// RUN: %clang_cc1 %s -triple wasm64-unknown-unknown -fms-extensions -fexceptions -fcxx-exceptions -mllvm -wasm-enable-eh -exception-model=wasm -target-feature +exception-handling -emit-llvm -o - -std=c++11 | FileCheck %s
diff --git a/clang/test/CodeGenCXX/windows-implicit-dllexport-template-specialization.cpp b/clang/test/CodeGenCXX/windows-implicit-dllexport-template-specialization.cpp
index 3a5693275824..d281826ee70f 100644
--- a/clang/test/CodeGenCXX/windows-implicit-dllexport-template-specialization.cpp
+++ b/clang/test/CodeGenCXX/windows-implicit-dllexport-template-specialization.cpp
@@ -1,7 +1,7 @@
// RUN: %clang_cc1 -std=c++11 -triple i686-windows -fdeclspec -emit-llvm %s -o - | FileCheck %s -check-prefix CHECK-MS
-// RUN: %clang_cc1 -std=c++11 -triple i686-windows-itanium -fdeclspec -emit-llvm %s -o - | FileCheck %s -check-prefix CHECK-IA
-// RUN: %clang_cc1 -std=c++11 -triple x86_64-scei-ps4 -fdeclspec -emit-llvm %s -o - | FileCheck %s -check-prefix CHECK-PS4
-// RUN: %clang_cc1 -std=c++11 -triple x86_64-sie-ps5 -fdeclspec -emit-llvm %s -o - | FileCheck %s -check-prefix CHECK-PS4
+// RUN: %clang_cc1 -std=c++11 -triple i686-windows-itanium -fdeclspec -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -std=c++11 -triple x86_64-scei-ps4 -fdeclspec -emit-llvm %s -o - | FileCheck %s
+// RUN: %clang_cc1 -std=c++11 -triple x86_64-sie-ps5 -fdeclspec -emit-llvm %s -o - | FileCheck %s
template <typename>
struct s {};
@@ -15,8 +15,5 @@ template class __declspec(dllexport) t<char>;
// CHECK-MS: dllexport {{.*}} @"??4?$t@D@@QAEAAV0@ABV0@@Z"
// CHECK-MS: dllexport {{.*}} @"??4?$s@D@@QAEAAU0@ABU0@@Z"
-// CHECK-IA: dllexport {{.*}} @_ZN1tIcEaSERKS0_
-// CHECK-IA: dllexport {{.*}} @_ZN1sIcEaSERKS0_
-
-// CHECK-PS4-NOT: @_ZN1tIcEaSERKS0_
-// CHECK-PS4-NOT: @_ZN1sIcEaSERKS0_
+// CHECK: dllexport {{.*}} @_ZN1tIcEaSERKS0_
+// CHECK: dllexport {{.*}} @_ZN1sIcEaSERKS0_
diff --git a/clang/test/CodeGenCXX/windows-itanium-dllexport.cpp b/clang/test/CodeGenCXX/windows-itanium-dllexport.cpp
index c09fa30d761a..334cebff99da 100644
--- a/clang/test/CodeGenCXX/windows-itanium-dllexport.cpp
+++ b/clang/test/CodeGenCXX/windows-itanium-dllexport.cpp
@@ -1,6 +1,6 @@
// RUN: %clang_cc1 -emit-llvm -triple i686-windows-itanium -fdeclspec %s -o - | FileCheck %s --check-prefixes=CHECK,WI
-// RUN: %clang_cc1 -emit-llvm -triple x86_64-scei-ps4 -fdeclspec %s -o - | FileCheck %s --check-prefixes=CHECK,PS4
-// RUN: %clang_cc1 -emit-llvm -triple x86_64-sie-ps5 -fdeclspec %s -o - | FileCheck %s --check-prefixes=CHECK,PS4
+// RUN: %clang_cc1 -emit-llvm -triple x86_64-scei-ps4 -fdeclspec %s -o - | FileCheck %s --check-prefixes=CHECK,PS
+// RUN: %clang_cc1 -emit-llvm -triple x86_64-sie-ps5 -fdeclspec %s -o - | FileCheck %s --check-prefixes=CHECK,PS
#define JOIN2(x, y) x##y
#define JOIN(x, y) JOIN2(x, y)
@@ -27,18 +27,14 @@ template class __declspec(dllexport) c<int>;
extern template class c<char>;
template class __declspec(dllexport) c<char>;
-// WI: define {{.*}} dllexport {{.*}} @_ZN1cIcEaSERKS0_
-// WI: define {{.*}} dllexport {{.*}} @_ZN1cIcE1fEv
-// PS4-NOT: @_ZN1cIcEaSERKS0_
-// PS4: define weak_odr void @_ZN1cIcE1fEv
+// CHECK: define {{.*}} dllexport {{.*}} @_ZN1cIcEaSERKS0_
+// CHECK: define {{.*}} dllexport {{.*}} @_ZN1cIcE1fEv
c<double> g;
template class __declspec(dllexport) c<double>;
-// WI: define {{.*}} dllexport {{.*}} @_ZN1cIdEaSERKS0_
-// WI: define {{.*}} dllexport {{.*}} @_ZN1cIdE1fEv
-// PS4-NOT: @_ZN1cIdEaSERKS0_
-// PS4: define weak_odr void @_ZN1cIdE1fEv
+// CHECK: define {{.*}} dllexport {{.*}} @_ZN1cIdEaSERKS0_
+// CHECK: define {{.*}} dllexport {{.*}} @_ZN1cIdE1fEv
template <class T>
struct outer {
@@ -59,4 +55,4 @@ USEMEMFUNC(outer<char>::inner, f)
// CHECK-DAG: declare dllimport {{.*}} @_ZN5outerIcE1fEv
// WI-DAG: define {{.*}} @_ZN5outerIcE5inner1fEv
-// PS4-DAG: declare {{.*}} @_ZN5outerIcE5inner1fEv
+// PS-DAG: declare {{.*}} @_ZN5outerIcE5inner1fEv
diff --git a/clang/test/CodeGenOpenCLCXX/array-type-infinite-loop.clcpp b/clang/test/CodeGenOpenCLCXX/array-type-infinite-loop.clcpp
new file mode 100644
index 000000000000..db9d7eb3281f
--- /dev/null
+++ b/clang/test/CodeGenOpenCLCXX/array-type-infinite-loop.clcpp
@@ -0,0 +1,25 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 4
+//RUN: %clang_cc1 %s -triple spir -emit-llvm -O1 -o - | FileCheck %s
+
+// CHECK-LABEL: define dso_local spir_kernel void @test(
+// CHECK-SAME: ptr addrspace(1) nocapture noundef readonly align 8 [[IN:%.*]], ptr addrspace(1) nocapture noundef writeonly align 8 [[OUT:%.*]]) local_unnamed_addr #[[ATTR0:[0-9]+]] !kernel_arg_addr_space [[META3:![0-9]+]] !kernel_arg_access_qual [[META4:![0-9]+]] !kernel_arg_type [[META5:![0-9]+]] !kernel_arg_base_type [[META5]] !kernel_arg_type_qual [[META6:![0-9]+]] {
+// CHECK-NEXT: entry:
+// CHECK-NEXT: [[ARRAYIDX1:%.*]] = getelementptr inbounds i8, ptr addrspace(1) [[IN]], i32 8
+// CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr addrspace(1) [[ARRAYIDX1]], align 8, !tbaa [[TBAA7:![0-9]+]]
+// CHECK-NEXT: store i64 [[TMP0]], ptr addrspace(1) [[OUT]], align 8, !tbaa [[TBAA7]]
+// CHECK-NEXT: ret void
+//
+__kernel void test(__global long *In, __global long *Out) {
+ long m[4] = { In[0], In[1], 0, 0 };
+ *Out = m[1];
+}
+//.
+// CHECK: [[META3]] = !{i32 1, i32 1}
+// CHECK: [[META4]] = !{!"none", !"none"}
+// CHECK: [[META5]] = !{!"long*", !"long*"}
+// CHECK: [[META6]] = !{!"", !""}
+// CHECK: [[TBAA7]] = !{[[META8:![0-9]+]], [[META8]], i64 0}
+// CHECK: [[META8]] = !{!"long", [[META9:![0-9]+]], i64 0}
+// CHECK: [[META9]] = !{!"omnipotent char", [[META10:![0-9]+]], i64 0}
+// CHECK: [[META10]] = !{!"Simple C++ TBAA"}
+//.
diff --git a/clang/test/CoverageMapping/mcdc-system-headers.cpp b/clang/test/CoverageMapping/mcdc-system-headers.cpp
index a8a3ddbb506f..4dfbb17c2bba 100644
--- a/clang/test/CoverageMapping/mcdc-system-headers.cpp
+++ b/clang/test/CoverageMapping/mcdc-system-headers.cpp
@@ -1,5 +1,5 @@
-// RUN: %clang_cc1 -std=c++11 -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -fcoverage-mcdc -mllvm -system-headers-coverage -emit-llvm-only -o - %s | FileCheck %s --check-prefixes=CHECK,W_SYS
-// RUN: %clang_cc1 -std=c++11 -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -fcoverage-mcdc -emit-llvm-only -o - %s | FileCheck %s --check-prefixes=CHECK,X_SYS
+// RUN: %clang_cc1 -std=c++11 -triple %itanium_abi_triple -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -fcoverage-mcdc -mllvm -system-headers-coverage -emit-llvm-only -o - %s | FileCheck %s --check-prefixes=CHECK,W_SYS
+// RUN: %clang_cc1 -std=c++11 -triple %itanium_abi_triple -fprofile-instrument=clang -fcoverage-mapping -dump-coverage-mapping -fcoverage-mcdc -emit-llvm-only -o - %s | FileCheck %s --check-prefixes=CHECK,X_SYS
#ifdef IS_SYSHEADER
diff --git a/clang/test/Driver/tocdata-cc1.c b/clang/test/Driver/tocdata-cc1.c
index fe0d97ea02db..e00383deecef 100644
--- a/clang/test/Driver/tocdata-cc1.c
+++ b/clang/test/Driver/tocdata-cc1.c
@@ -1,16 +1,13 @@
// RUN: %clang -### --target=powerpc-ibm-aix-xcoff -mcmodel=medium -mtocdata %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHECK-NOTOC %s
+// RUN: | FileCheck %s
// RUN: %clang -### --target=powerpc-ibm-aix-xcoff -mcmodel=large -mtocdata %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHECK-NOTOC %s
+// RUN: | FileCheck %s
// RUN: %clang -### --target=powerpc-ibm-aix-xcoff -mtocdata %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHECK-TOC %s
+// RUN: | FileCheck %s
// RUN: %clang -### --target=powerpc64-ibm-aix-xcoff -mcmodel=medium -mtocdata %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHECK-NOTOC %s
+// RUN: | FileCheck %s
// RUN: %clang -### --target=powerpc64-ibm-aix-xcoff -mcmodel=large -mtocdata %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHECK-NOTOC %s
+// RUN: | FileCheck %s
// RUN: %clang -### --target=powerpc64-ibm-aix-xcoff -mtocdata %s 2>&1 \
-// RUN: | FileCheck -check-prefix=CHECK-TOC %s
-// CHECK-NOTOC: warning: ignoring '-mtocdata' as it is only supported for -mcmodel=small
-// CHECK-NOTOC-NOT: "-cc1"{{.*}}" "-mtocdata"
-// CHECK-TOC: "-cc1"{{.*}}" "-mtocdata"
-// CHECK-TOC-NOT: warning: ignoring '-mtocdata' as it is only supported for -mcmodel=small
+// RUN: | FileCheck %s
+// CHECK: "-cc1"{{.*}}" "-mtocdata"
diff --git a/clang/test/InstallAPI/alias_list.test b/clang/test/InstallAPI/alias_list.test
index 3e12221e088c..aba7e395cca9 100644
--- a/clang/test/InstallAPI/alias_list.test
+++ b/clang/test/InstallAPI/alias_list.test
@@ -23,7 +23,7 @@
; RUN: -o %t/AliasList.tbd 2>&1 | FileCheck -allow-empty %s \
; RUN: --check-prefix=INVALID
-; INVALID: error: could not read alias list {{.*}} missing alias for: _hidden
+; INVALID: error: could not read symbol alias input list {{.*}}invalid.txt': invalid input format: missing alias for: _hidden
;--- Frameworks/AliasList.framework/Headers/AliasList.h
// simple alias from one symbol to another.
diff --git a/clang/test/InstallAPI/exclusive-passes-2.test b/clang/test/InstallAPI/exclusive-passes-2.test
index 3e7a6d777d5a..132b27df383c 100644
--- a/clang/test/InstallAPI/exclusive-passes-2.test
+++ b/clang/test/InstallAPI/exclusive-passes-2.test
@@ -11,6 +11,15 @@
; RUN: -DFoo -XApple -DDarwin=1 -XElf -DNONDarwin=1 2>&1 | FileCheck -allow-empty %s
; RUN: llvm-readtapi --compare %t/output.tbd %t/expected.tbd 2>&1 | FileCheck -allow-empty %s
+; RUN: clang-installapi -target arm64-apple-macos12 \
+; RUN: -install_name @rpath/libfoo.dylib \
+; RUN: -current_version 1 -compatibility_version 1 \
+; RUN: -I%S/Inputs/LibFoo/usr/include -dynamiclib \
+; RUN: -extra-public-header %S/Inputs/LibFoo/usr/include/foo.h \
+; RUN: -o %t/output2.tbd \
+; RUN: -DFoo -optionlist %t/options.json 2>&1 | FileCheck -allow-empty %s
+; RUN: llvm-readtapi --compare %t/output.tbd %t/expected.tbd 2>&1 | FileCheck -allow-empty %s
+
; CHECK-NOT: error
; CHECK-NOT: warning
diff --git a/clang/test/InstallAPI/exclusive-passes-3.test b/clang/test/InstallAPI/exclusive-passes-3.test
new file mode 100644
index 000000000000..3a9b64c9f7b8
--- /dev/null
+++ b/clang/test/InstallAPI/exclusive-passes-3.test
@@ -0,0 +1,86 @@
+; RUN: rm -rf %t
+; RUN: split-file %s %t
+
+// "Apple" label has split options between the optionlist & command line.
+; RUN: clang-installapi -target arm64-apple-macos12 \
+; RUN: -install_name @rpath/libfoo.dylib -current_version 1 \
+; RUN: -compatibility_version 1 \
+; RUN: -extra-public-header %t/usr/include/opts.h \
+; RUN: -optionlist %t/options.json -XApple -DCLI_OPT=1 \
+; RUN: -I%S/Inputs/LibFoo/usr/include \
+; RUN: -I%t/usr/include -dynamiclib -o %t/output.tbd 2>&1 | FileCheck %s -allow-empty
+; RUN: llvm-readtapi --compare %t/output.tbd %t/expected.tbd 2>&1 | FileCheck -allow-empty %s
+
+// Validate duplicated options give same result.
+; RUN: clang-installapi -target arm64-apple-macos12 \
+; RUN: -install_name @rpath/libfoo.dylib -current_version 1 \
+; RUN: -compatibility_version 1 \
+; RUN: -extra-public-header %t/usr/include/opts.h \
+; RUN: -optionlist %t/options.json -XApple -DCLI_OPT=1 \
+; RUN: -I%S/Inputs/LibFoo/usr/include \
+; RUN: -XApple -DDarwin -XElf -DNONDarwin \
+; RUN: -I%t/usr/include -dynamiclib -o %t/output2.tbd 2>&1 | FileCheck %s -allow-empty
+; RUN: llvm-readtapi --compare %t/output2.tbd %t/expected.tbd 2>&1 | FileCheck -allow-empty %s
+
+; CHECK-NOT: error
+; CHECK-NOT: warning
+
+;--- usr/include/opts.h
+#ifndef OPTS_H
+#define OPTS_H
+#include <macro_defs.h>
+
+#if defined(CLI_OPT) && CLI_OPT
+ #define SUFFIX "$final"
+#else
+ #define SUFFIX
+#endif
+
+
+#define __STRING(x) #x
+#define PLATFORM_ALIAS(sym) __asm("_" __STRING(sym) DARWIN LINUX SUFFIX)
+extern int foo() PLATFORM_ALIAS(foo);
+
+#endif
+
+;--- expected.tbd
+{
+ "main_library": {
+ "exported_symbols": [
+ {
+ "text": {
+ "global": [
+ "_foo$darwin$final",
+ "_foo$linux",
+ "_foo"
+ ]
+ }
+ }
+ ],
+ "flags": [
+ {
+ "attributes": [
+ "not_app_extension_safe"
+ ]
+ }
+ ],
+ "install_names": [
+ {
+ "name": "@rpath/libfoo.dylib"
+ }
+ ],
+ "target_info": [
+ {
+ "min_deployment": "12",
+ "target": "arm64-macos"
+ }
+ ]
+ },
+ "tapi_tbd_version": 5
+}
+
+//--- options.json
+{
+ "Apple" : ["-DDarwin=1"],
+ "Elf" : ["-DNONDarwin=1"]
+}
diff --git a/clang/test/InstallAPI/exclusive-passes.test b/clang/test/InstallAPI/exclusive-passes.test
index 29b0fc3d7a2a..8e2d01ebaab1 100644
--- a/clang/test/InstallAPI/exclusive-passes.test
+++ b/clang/test/InstallAPI/exclusive-passes.test
@@ -10,6 +10,15 @@
; RUN: -o %t/output.tbd -v 2>&1 | FileCheck %s --check-prefix=INSTALLAPI
; RUN: llvm-readtapi --compare %t/output.tbd %t/expected.tbd 2>&1 | FileCheck -allow-empty %s
+// Try with -optionlist.
+; RUN: clang-installapi \
+; RUN: -target arm64-apple-macos12 -install_name @rpath/libfoo.dylib \
+; RUN: -current_version 1 -compatibility_version 1 \
+; RUN: -I%S/Inputs/LibFoo/usr/include -dynamiclib \
+; RUN: -extra-public-header %S/Inputs/LibFoo/usr/include/public.h \
+; RUN: -optionlist %t/options.json -o %t/output2.tbd 2>&1 | FileCheck %s -allow-empty
+; RUN: llvm-readtapi --compare %t/output2.tbd %t/expected.tbd 2>&1 | FileCheck -allow-empty %s
+
; CHECK-NOT: error
; CHECK-NOT: warning
@@ -17,6 +26,12 @@
; INSTALLAPI: Apple Public Headers:
; INSTALLAPI: Elf Public Headers:
+;--- options.json
+{
+ "Apple" : ["-DDarwin=1"],
+ "Elf" : ["-DNONDarwin=1"]
+}
+
;--- expected.tbd
{
"main_library": {
diff --git a/clang/test/InstallAPI/invalid-exclusive-passes.test b/clang/test/InstallAPI/invalid-exclusive-passes.test
index c23c918f0bfb..4b0b64efba08 100644
--- a/clang/test/InstallAPI/invalid-exclusive-passes.test
+++ b/clang/test/InstallAPI/invalid-exclusive-passes.test
@@ -30,6 +30,39 @@
; RUN: -o %t/output.tbd 2>&1 | FileCheck %s --check-prefix=INVALID_PROJECT_OPT
; INVALID_PROJECT_OPT: error: invalid argument '-Xproject' not allowed with '-fprofile-instr-generate'
+// Validate arguments not allowed with -X passed via json
+; RUN: not clang-installapi -target arm64-apple-macos12 \
+; RUN: -install_name @rpath/libfoo.dylib -current_version 1 -compatibility_version 1 \
+; RUN: -optionlist %t/options.json -I/fake/path \
+; RUN: -I%t -dynamiclib -o %t/output.tbd 2>&1 | FileCheck %s --check-prefix=INVALID_JSON_OPT
+; INVALID_JSON_OPT: error: invalid argument '-XApple' not allowed with '-I/fake/path'
+
+// Validate invalid json path
+; RUN: not clang-installapi -target arm64-apple-macos12 \
+; RUN: -install_name @rpath/libfoo.dylib -current_version 1 \
+; RUN: -compatibility_version 1 -optionlist %t/invalid_loc.json \
+; RUN: -I/fake/path -I%t -dynamiclib \
+; RUN: -o %t/output.tbd %t 2>&1 | FileCheck %s --check-prefix=INVALID_JSON_LOC -DMSG=%errc_ENOENT
+; INVALID_JSON_LOC: error: cannot open file {{.*}}invalid_loc.json': [[MSG]]
+
+// Validate invalid json format
+; RUN: not clang-installapi -target arm64-apple-macos12 \
+; RUN: -install_name @rpath/libfoo.dylib -current_version 1 \
+; RUN: -compatibility_version 1 -optionlist %t/invalid_format.json \
+; RUN: -I/fake/path -isysroot %sysroot -I%t -dynamiclib \
+; RUN: -o %t/output.tbd %t 2>&1 | FileCheck %s --check-prefix=INVALID_JSON_FORMAT
+; INVALID_JSON_FORMAT: error: could not read option input list {{.*}}invalid_format.json': invalid input format
+
+;--- options.json
+{
+ "Apple" : ["-I/fake/path"]
+}
+
+;--- invalid_format.json
+{
+ "Apple" : {"opt" : "-I/fake/path"}
+}
+
;--- inputs.json
{
"headers": [ ],
diff --git a/clang/test/OpenMP/threadprivate_codegen.cpp b/clang/test/OpenMP/threadprivate_codegen.cpp
index d0bd2b411ec8..b27783be829d 100644
--- a/clang/test/OpenMP/threadprivate_codegen.cpp
+++ b/clang/test/OpenMP/threadprivate_codegen.cpp
@@ -1039,40 +1039,40 @@ int foobar() {
// CHECK1-NEXT: [[ARRAYINIT_BEGIN1:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[ARRAYINIT_BEGIN1]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN1]], i32 noundef 1)
-// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK1: invoke.cont:
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[ARRAYINIT_BEGIN1]], i64 1
// CHECK1-NEXT: store ptr [[ARRAYINIT_ELEMENT]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
-// CHECK1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// CHECK1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// CHECK1: invoke.cont3:
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT]], i64 1
// CHECK1-NEXT: store ptr [[ARRAYINIT_ELEMENT4]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT4]], i32 noundef 3)
-// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
+// CHECK1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
// CHECK1: invoke.cont5:
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT7:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 1
// CHECK1-NEXT: store ptr [[ARRAYINIT_ELEMENT7]], ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK1-NEXT: [[ARRAYINIT_BEGIN8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_ELEMENT7]], i64 0, i64 0
// CHECK1-NEXT: store ptr [[ARRAYINIT_BEGIN8]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN8]], i32 noundef 4)
-// CHECK1-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]]
+// CHECK1-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]]
// CHECK1: invoke.cont11:
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT12:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_BEGIN8]], i64 1
// CHECK1-NEXT: store ptr [[ARRAYINIT_ELEMENT12]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT12]], i32 noundef 5)
-// CHECK1-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]]
+// CHECK1-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]]
// CHECK1: invoke.cont13:
// CHECK1-NEXT: [[ARRAYINIT_ELEMENT14:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT12]], i64 1
// CHECK1-NEXT: store ptr [[ARRAYINIT_ELEMENT14]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT14]], i32 noundef 6)
-// CHECK1-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]]
+// CHECK1-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]]
// CHECK1: invoke.cont15:
// CHECK1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
// CHECK1-NEXT: ret ptr [[TMP2]]
// CHECK1: lpad:
// CHECK1-NEXT: [[TMP3:%.*]] = landingpad { ptr, i32 }
-// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: cleanup
// CHECK1-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0
// CHECK1-NEXT: store ptr [[TMP4]], ptr [[EXN_SLOT]], align 8
// CHECK1-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 1
@@ -1090,7 +1090,7 @@ int foobar() {
// CHECK1-NEXT: br label [[EHCLEANUP:%.*]]
// CHECK1: lpad10:
// CHECK1-NEXT: [[TMP7:%.*]] = landingpad { ptr, i32 }
-// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: cleanup
// CHECK1-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 0
// CHECK1-NEXT: store ptr [[TMP8]], ptr [[EXN_SLOT]], align 8
// CHECK1-NEXT: [[TMP9:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 1
@@ -1254,34 +1254,34 @@ int foobar() {
// CHECK1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK1: invoke.cont:
// CHECK1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// CHECK1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
+// CHECK1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
// CHECK1: invoke.cont2:
// CHECK1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// CHECK1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// CHECK1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// CHECK1: invoke.cont3:
// CHECK1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
+// CHECK1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
// CHECK1: invoke.cont7:
// CHECK1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// CHECK1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
+// CHECK1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
// CHECK1: invoke.cont8:
// CHECK1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// CHECK1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
+// CHECK1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
// CHECK1: invoke.cont9:
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]]
// CHECK1-NEXT: ret void
// CHECK1: lpad:
// CHECK1-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: cleanup
// CHECK1-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0
// CHECK1-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8
// CHECK1-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1
@@ -1299,7 +1299,7 @@ int foobar() {
// CHECK1-NEXT: br label [[EHCLEANUP:%.*]]
// CHECK1: lpad6:
// CHECK1-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: cleanup
// CHECK1-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
// CHECK1-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
// CHECK1-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
@@ -1375,7 +1375,7 @@ int foobar() {
// CHECK1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP4]], i32 0, i32 0
// CHECK1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// CHECK1-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP5]])
-// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK1: invoke.cont:
// CHECK1-NEXT: [[TMP6:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]]
// CHECK1-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]]
@@ -1436,7 +1436,7 @@ int foobar() {
// CHECK1-NEXT: ret i32 [[TMP32]]
// CHECK1: lpad:
// CHECK1-NEXT: [[TMP33:%.*]] = landingpad { ptr, i32 }
-// CHECK1-NEXT: cleanup
+// CHECK1-NEXT: cleanup
// CHECK1-NEXT: [[TMP34:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 0
// CHECK1-NEXT: store ptr [[TMP34]], ptr [[EXN_SLOT]], align 8
// CHECK1-NEXT: [[TMP35:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 1
@@ -1525,7 +1525,7 @@ int foobar() {
//
//
// CHECK1-LABEL: define {{[^@]+}}@_Z6foobarv
-// CHECK1-SAME: () #[[ATTR5:[0-9]+]] {
+// CHECK1-SAME: () #[[ATTR2]] {
// CHECK1-NEXT: entry:
// CHECK1-NEXT: [[RES:%.*]] = alloca i32, align 4
// CHECK1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
@@ -1777,34 +1777,34 @@ int foobar() {
// CHECK2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK2: invoke.cont:
// CHECK2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// CHECK2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
+// CHECK2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
// CHECK2: invoke.cont2:
// CHECK2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// CHECK2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// CHECK2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// CHECK2: invoke.cont3:
// CHECK2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// CHECK2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
+// CHECK2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
// CHECK2: invoke.cont7:
// CHECK2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// CHECK2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
+// CHECK2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
// CHECK2: invoke.cont8:
// CHECK2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// CHECK2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
+// CHECK2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
// CHECK2: invoke.cont9:
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]]
// CHECK2-NEXT: ret void
// CHECK2: lpad:
// CHECK2-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// CHECK2-NEXT: cleanup
+// CHECK2-NEXT: cleanup
// CHECK2-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0
// CHECK2-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8
// CHECK2-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1
@@ -1822,7 +1822,7 @@ int foobar() {
// CHECK2-NEXT: br label [[EHCLEANUP:%.*]]
// CHECK2: lpad6:
// CHECK2-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// CHECK2-NEXT: cleanup
+// CHECK2-NEXT: cleanup
// CHECK2-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
// CHECK2-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
// CHECK2-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
@@ -1891,40 +1891,40 @@ int foobar() {
// CHECK2-NEXT: [[ARRAYINIT_BEGIN1:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0
// CHECK2-NEXT: store ptr [[ARRAYINIT_BEGIN1]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN1]], i32 noundef 1)
-// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK2: invoke.cont:
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[ARRAYINIT_BEGIN1]], i64 1
// CHECK2-NEXT: store ptr [[ARRAYINIT_ELEMENT]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
-// CHECK2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// CHECK2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// CHECK2: invoke.cont3:
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT]], i64 1
// CHECK2-NEXT: store ptr [[ARRAYINIT_ELEMENT4]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT4]], i32 noundef 3)
-// CHECK2-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
+// CHECK2-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]]
// CHECK2: invoke.cont5:
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT7:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 1
// CHECK2-NEXT: store ptr [[ARRAYINIT_ELEMENT7]], ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK2-NEXT: [[ARRAYINIT_BEGIN8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_ELEMENT7]], i64 0, i64 0
// CHECK2-NEXT: store ptr [[ARRAYINIT_BEGIN8]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN8]], i32 noundef 4)
-// CHECK2-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]]
+// CHECK2-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]]
// CHECK2: invoke.cont11:
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT12:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_BEGIN8]], i64 1
// CHECK2-NEXT: store ptr [[ARRAYINIT_ELEMENT12]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT12]], i32 noundef 5)
-// CHECK2-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]]
+// CHECK2-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]]
// CHECK2: invoke.cont13:
// CHECK2-NEXT: [[ARRAYINIT_ELEMENT14:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT12]], i64 1
// CHECK2-NEXT: store ptr [[ARRAYINIT_ELEMENT14]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8
// CHECK2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT14]], i32 noundef 6)
-// CHECK2-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]]
+// CHECK2-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]]
// CHECK2: invoke.cont15:
// CHECK2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8
// CHECK2-NEXT: ret ptr [[TMP2]]
// CHECK2: lpad:
// CHECK2-NEXT: [[TMP3:%.*]] = landingpad { ptr, i32 }
-// CHECK2-NEXT: cleanup
+// CHECK2-NEXT: cleanup
// CHECK2-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0
// CHECK2-NEXT: store ptr [[TMP4]], ptr [[EXN_SLOT]], align 8
// CHECK2-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 1
@@ -1942,7 +1942,7 @@ int foobar() {
// CHECK2-NEXT: br label [[EHCLEANUP:%.*]]
// CHECK2: lpad10:
// CHECK2-NEXT: [[TMP7:%.*]] = landingpad { ptr, i32 }
-// CHECK2-NEXT: cleanup
+// CHECK2-NEXT: cleanup
// CHECK2-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 0
// CHECK2-NEXT: store ptr [[TMP8]], ptr [[EXN_SLOT]], align 8
// CHECK2-NEXT: [[TMP9:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 1
@@ -2029,7 +2029,7 @@ int foobar() {
// CHECK2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP4]], i32 0, i32 0
// CHECK2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4
// CHECK2-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP5]])
-// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK2: invoke.cont:
// CHECK2-NEXT: [[TMP6:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]]
// CHECK2-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]]
@@ -2090,7 +2090,7 @@ int foobar() {
// CHECK2-NEXT: ret i32 [[TMP32]]
// CHECK2: lpad:
// CHECK2-NEXT: [[TMP33:%.*]] = landingpad { ptr, i32 }
-// CHECK2-NEXT: cleanup
+// CHECK2-NEXT: cleanup
// CHECK2-NEXT: [[TMP34:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 0
// CHECK2-NEXT: store ptr [[TMP34]], ptr [[EXN_SLOT]], align 8
// CHECK2-NEXT: [[TMP35:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 1
@@ -2154,7 +2154,7 @@ int foobar() {
//
//
// CHECK2-LABEL: define {{[^@]+}}@_Z6foobarv
-// CHECK2-SAME: () #[[ATTR5:[0-9]+]] {
+// CHECK2-SAME: () #[[ATTR2]] {
// CHECK2-NEXT: entry:
// CHECK2-NEXT: [[RES:%.*]] = alloca i32, align 4
// CHECK2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]])
@@ -2452,34 +2452,34 @@ int foobar() {
// SIMD1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8
// SIMD1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// SIMD1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// SIMD1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// SIMD1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// SIMD1: invoke.cont:
// SIMD1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// SIMD1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// SIMD1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
+// SIMD1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
// SIMD1: invoke.cont2:
// SIMD1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// SIMD1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// SIMD1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// SIMD1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// SIMD1: invoke.cont3:
// SIMD1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8
// SIMD1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// SIMD1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// SIMD1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
+// SIMD1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
// SIMD1: invoke.cont7:
// SIMD1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// SIMD1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// SIMD1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
+// SIMD1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
// SIMD1: invoke.cont8:
// SIMD1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// SIMD1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// SIMD1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
+// SIMD1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
// SIMD1: invoke.cont9:
// SIMD1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]]
// SIMD1-NEXT: ret void
// SIMD1: lpad:
// SIMD1-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// SIMD1-NEXT: cleanup
+// SIMD1-NEXT: cleanup
// SIMD1-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0
// SIMD1-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8
// SIMD1-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1
@@ -2497,7 +2497,7 @@ int foobar() {
// SIMD1-NEXT: br label [[EHCLEANUP:%.*]]
// SIMD1: lpad6:
// SIMD1-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// SIMD1-NEXT: cleanup
+// SIMD1-NEXT: cleanup
// SIMD1-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
// SIMD1-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
// SIMD1-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
@@ -2568,7 +2568,7 @@ int foobar() {
// SIMD1: init:
// SIMD1-NEXT: [[TMP2:%.*]] = load i32, ptr @_ZL3gs1, align 4
// SIMD1-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP2]])
-// SIMD1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// SIMD1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// SIMD1: invoke.cont:
// SIMD1-NEXT: [[TMP3:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]]
// SIMD1-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]]
@@ -2613,7 +2613,7 @@ int foobar() {
// SIMD1-NEXT: ret i32 [[TMP21]]
// SIMD1: lpad:
// SIMD1-NEXT: [[TMP22:%.*]] = landingpad { ptr, i32 }
-// SIMD1-NEXT: cleanup
+// SIMD1-NEXT: cleanup
// SIMD1-NEXT: [[TMP23:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 0
// SIMD1-NEXT: store ptr [[TMP23]], ptr [[EXN_SLOT]], align 8
// SIMD1-NEXT: [[TMP24:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 1
@@ -2652,7 +2652,7 @@ int foobar() {
//
//
// SIMD1-LABEL: define {{[^@]+}}@_Z6foobarv
-// SIMD1-SAME: () #[[ATTR5:[0-9]+]] {
+// SIMD1-SAME: () #[[ATTR2]] {
// SIMD1-NEXT: entry:
// SIMD1-NEXT: [[RES:%.*]] = alloca i32, align 4
// SIMD1-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN6Static1sE, align 4
@@ -2840,179 +2840,179 @@ int foobar() {
// SIMD2-LABEL: define {{[^@]+}}@__cxx_global_var_init
// SIMD2-SAME: () #[[ATTR0:[0-9]+]] !dbg [[DBG115:![0-9]+]] {
// SIMD2-NEXT: entry:
-// SIMD2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG119:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR3:[0-9]+]], !dbg [[DBG121:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG122:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG118:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR3:[0-9]+]], !dbg [[DBG120:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG121:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S1C1Ei
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 !dbg [[DBG123:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 !dbg [[DBG122:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META124:![0-9]+]], metadata !DIExpression()), !dbg [[DBG126:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META123:![0-9]+]], metadata !DIExpression()), !dbg [[DBG125:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META127:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META126:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG129:![0-9]+]]
-// SIMD2-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG129]]
-// SIMD2-NEXT: ret void, !dbg [[DBG130:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG128:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG128]]
+// SIMD2-NEXT: ret void, !dbg [[DBG129:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S1D1Ev
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG131:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG130:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META131:![0-9]+]], metadata !DIExpression()), !dbg [[DBG132:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]], !dbg [[DBG134:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG135:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]], !dbg [[DBG133:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG134:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
-// SIMD2-SAME: () #[[ATTR0]] !dbg [[DBG136:![0-9]+]] {
+// SIMD2-SAME: () #[[ATTR0]] !dbg [[DBG135:![0-9]+]] {
// SIMD2-NEXT: entry:
-// SIMD2-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG137:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG139:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG140:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG136:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG138:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG139:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S2C1Ei
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG141:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG140:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META142:![0-9]+]], metadata !DIExpression()), !dbg [[DBG144:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META141:![0-9]+]], metadata !DIExpression()), !dbg [[DBG143:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META145:![0-9]+]], metadata !DIExpression()), !dbg [[DBG146:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG145:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG147:![0-9]+]]
-// SIMD2-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG147]]
-// SIMD2-NEXT: ret void, !dbg [[DBG148:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG146:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG146]]
+// SIMD2-NEXT: ret void, !dbg [[DBG147:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S2D1Ev
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG149:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG148:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META150:![0-9]+]], metadata !DIExpression()), !dbg [[DBG151:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META149:![0-9]+]], metadata !DIExpression()), !dbg [[DBG150:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR3]], !dbg [[DBG152:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG153:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR3]], !dbg [[DBG151:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG152:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
-// SIMD2-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG154:![0-9]+]] {
+// SIMD2-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG153:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// SIMD2-NEXT: [[ARRAYINIT_ENDOFINIT5:%.*]] = alloca ptr, align 8
-// SIMD2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG155:![0-9]+]]
-// SIMD2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157:![0-9]+]]
+// SIMD2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG154:![0-9]+]]
+// SIMD2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156:![0-9]+]]
// SIMD2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// SIMD2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG158:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG157:![0-9]+]]
// SIMD2: invoke.cont:
-// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157]]
+// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156]]
// SIMD2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// SIMD2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG159:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG158:![0-9]+]]
// SIMD2: invoke.cont2:
-// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157]]
+// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156]]
// SIMD2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// SIMD2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG160:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG159:![0-9]+]]
// SIMD2: invoke.cont3:
-// SIMD2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG155]]
-// SIMD2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161:![0-9]+]]
+// SIMD2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG154]]
+// SIMD2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160:![0-9]+]]
// SIMD2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// SIMD2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG162:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG161:![0-9]+]]
// SIMD2: invoke.cont7:
-// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161]]
+// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160]]
// SIMD2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// SIMD2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG163:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG162:![0-9]+]]
// SIMD2: invoke.cont8:
-// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161]]
+// SIMD2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160]]
// SIMD2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// SIMD2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG164:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG163:![0-9]+]]
// SIMD2: invoke.cont9:
-// SIMD2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG165:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG165]]
+// SIMD2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG164:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG164]]
// SIMD2: lpad:
// SIMD2-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// SIMD2-NEXT: cleanup, !dbg [[DBG166:![0-9]+]]
-// SIMD2-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG166]]
-// SIMD2-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG166]]
-// SIMD2-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG166]]
-// SIMD2-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG166]]
-// SIMD2-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG157]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG157]]
+// SIMD2-NEXT: cleanup, !dbg [[DBG165:![0-9]+]]
+// SIMD2-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG165]]
+// SIMD2-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG165]]
+// SIMD2-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG165]]
+// SIMD2-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG165]]
+// SIMD2-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG156]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG156]]
// SIMD2: arraydestroy.body:
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG157]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG157]]
-// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG157]]
-// SIMD2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG157]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG157]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG156]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG156]]
+// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG156]]
+// SIMD2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG156]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG156]]
// SIMD2: arraydestroy.done4:
-// SIMD2-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG157]]
+// SIMD2-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG156]]
// SIMD2: lpad6:
// SIMD2-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// SIMD2-NEXT: cleanup, !dbg [[DBG166]]
-// SIMD2-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG166]]
-// SIMD2-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG166]]
-// SIMD2-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG166]]
-// SIMD2-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG166]]
-// SIMD2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG161]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG161]]
+// SIMD2-NEXT: cleanup, !dbg [[DBG165]]
+// SIMD2-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG165]]
+// SIMD2-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG165]]
+// SIMD2-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG165]]
+// SIMD2-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG165]]
+// SIMD2-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG160]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG160]]
// SIMD2: arraydestroy.body11:
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG161]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG161]]
-// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR3]], !dbg [[DBG161]]
-// SIMD2-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG161]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG161]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG160]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG160]]
+// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR3]], !dbg [[DBG160]]
+// SIMD2-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG160]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG160]]
// SIMD2: arraydestroy.done15:
-// SIMD2-NEXT: br label [[EHCLEANUP]], !dbg [[DBG161]]
+// SIMD2-NEXT: br label [[EHCLEANUP]], !dbg [[DBG160]]
// SIMD2: ehcleanup:
-// SIMD2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG155]]
-// SIMD2-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG155]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG155]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG155]]
+// SIMD2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG154]]
+// SIMD2-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG154]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG154]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG154]]
// SIMD2: arraydestroy.body17:
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG155]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG155]]
-// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR3]], !dbg [[DBG155]]
-// SIMD2-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG155]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG155]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG154]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG154]]
+// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR3]], !dbg [[DBG154]]
+// SIMD2-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG154]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG154]]
// SIMD2: arraydestroy.done21:
-// SIMD2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG155]]
+// SIMD2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG154]]
// SIMD2: eh.resume:
-// SIMD2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG155]]
-// SIMD2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG155]]
-// SIMD2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG155]]
-// SIMD2-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG155]]
-// SIMD2-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG155]]
+// SIMD2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG154]]
+// SIMD2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG154]]
+// SIMD2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG154]]
+// SIMD2-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG154]]
+// SIMD2-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG154]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// SIMD2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG167:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG166:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META171:![0-9]+]], metadata !DIExpression()), !dbg [[DBG172:![0-9]+]]
-// SIMD2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG172]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META170:![0-9]+]], metadata !DIExpression()), !dbg [[DBG171:![0-9]+]]
+// SIMD2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG171]]
// SIMD2: arraydestroy.body:
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG172]]
-// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG172]]
-// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG172]]
-// SIMD2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG172]]
-// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG172]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG171]]
+// SIMD2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG171]]
+// SIMD2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG171]]
+// SIMD2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG171]]
+// SIMD2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG171]]
// SIMD2: arraydestroy.done1:
-// SIMD2-NEXT: ret void, !dbg [[DBG172]]
+// SIMD2-NEXT: ret void, !dbg [[DBG171]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@main
@@ -3023,302 +3023,302 @@ int foobar() {
// SIMD2-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META173:![0-9]+]], metadata !DIExpression()), !dbg [[DBG174:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = load atomic i8, ptr @_ZGVZ4mainE2sm acquire, align 8, !dbg [[DBG175:![0-9]+]]
-// SIMD2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG175]]
-// SIMD2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG175]], !prof [[PROF176:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META172:![0-9]+]], metadata !DIExpression()), !dbg [[DBG173:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load atomic i8, ptr @_ZGVZ4mainE2sm acquire, align 8, !dbg [[DBG174:![0-9]+]]
+// SIMD2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG174]]
+// SIMD2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG174]], !prof [[PROF175:![0-9]+]]
// SIMD2: init.check:
-// SIMD2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG175]]
-// SIMD2-NEXT: br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]], !dbg [[DBG175]]
+// SIMD2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD2-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG174]]
+// SIMD2-NEXT: br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]], !dbg [[DBG174]]
// SIMD2: init:
-// SIMD2-NEXT: [[TMP2:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG177:![0-9]+]]
+// SIMD2-NEXT: [[TMP2:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG176:![0-9]+]]
// SIMD2-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP2]])
-// SIMD2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG178:![0-9]+]]
+// SIMD2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG177:![0-9]+]]
// SIMD2: invoke.cont:
-// SIMD2-NEXT: [[TMP3:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD2-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD2-NEXT: br label [[INIT_END]], !dbg [[DBG175]]
+// SIMD2-NEXT: [[TMP3:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD2-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD2-NEXT: br label [[INIT_END]], !dbg [[DBG174]]
// SIMD2: init.end:
-// SIMD2-NEXT: [[TMP4:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG179:![0-9]+]]
-// SIMD2-NEXT: store i32 [[TMP4]], ptr [[RES]], align 4, !dbg [[DBG180:![0-9]+]]
-// SIMD2-NEXT: [[TMP5:%.*]] = load i32, ptr @_ZZ4mainE2sm, align 8, !dbg [[DBG181:![0-9]+]]
-// SIMD2-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG182:![0-9]+]]
-// SIMD2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG182]]
-// SIMD2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG182]]
-// SIMD2-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG183:![0-9]+]]
-// SIMD2-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG184:![0-9]+]]
-// SIMD2-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG184]]
-// SIMD2-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG184]]
-// SIMD2-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG185:![0-9]+]]
-// SIMD2-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG186:![0-9]+]]
-// SIMD2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG186]]
-// SIMD2-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG186]]
-// SIMD2-NEXT: [[TMP11:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG187:![0-9]+]]
-// SIMD2-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG188:![0-9]+]]
-// SIMD2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]], !dbg [[DBG188]]
-// SIMD2-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG188]]
-// SIMD2-NEXT: [[TMP13:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG189:![0-9]+]]
-// SIMD2-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG190:![0-9]+]]
-// SIMD2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG190]]
-// SIMD2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG190]]
-// SIMD2-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG191:![0-9]+]]
-// SIMD2-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG192:![0-9]+]]
-// SIMD2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG192]]
-// SIMD2-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG192]]
-// SIMD2-NEXT: [[TMP17:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG193:![0-9]+]]
-// SIMD2-NEXT: [[CONV:%.*]] = fptosi float [[TMP17]] to i32, !dbg [[DBG193]]
-// SIMD2-NEXT: [[TMP18:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG194:![0-9]+]]
-// SIMD2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[CONV]], !dbg [[DBG194]]
-// SIMD2-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG194]]
-// SIMD2-NEXT: [[TMP19:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG195:![0-9]+]]
-// SIMD2-NEXT: [[TMP20:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG196:![0-9]+]]
-// SIMD2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], [[TMP19]], !dbg [[DBG196]]
-// SIMD2-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG196]]
-// SIMD2-NEXT: [[TMP21:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG197:![0-9]+]]
-// SIMD2-NEXT: ret i32 [[TMP21]], !dbg [[DBG198:![0-9]+]]
+// SIMD2-NEXT: [[TMP4:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG178:![0-9]+]]
+// SIMD2-NEXT: store i32 [[TMP4]], ptr [[RES]], align 4, !dbg [[DBG179:![0-9]+]]
+// SIMD2-NEXT: [[TMP5:%.*]] = load i32, ptr @_ZZ4mainE2sm, align 8, !dbg [[DBG180:![0-9]+]]
+// SIMD2-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG181:![0-9]+]]
+// SIMD2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG181]]
+// SIMD2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG181]]
+// SIMD2-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG182:![0-9]+]]
+// SIMD2-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG183:![0-9]+]]
+// SIMD2-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG183]]
+// SIMD2-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG183]]
+// SIMD2-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG184:![0-9]+]]
+// SIMD2-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG185:![0-9]+]]
+// SIMD2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG185]]
+// SIMD2-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG185]]
+// SIMD2-NEXT: [[TMP11:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG186:![0-9]+]]
+// SIMD2-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG187:![0-9]+]]
+// SIMD2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]], !dbg [[DBG187]]
+// SIMD2-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG187]]
+// SIMD2-NEXT: [[TMP13:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG188:![0-9]+]]
+// SIMD2-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG189:![0-9]+]]
+// SIMD2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG189]]
+// SIMD2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG189]]
+// SIMD2-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG190:![0-9]+]]
+// SIMD2-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG191:![0-9]+]]
+// SIMD2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG191]]
+// SIMD2-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG191]]
+// SIMD2-NEXT: [[TMP17:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG192:![0-9]+]]
+// SIMD2-NEXT: [[CONV:%.*]] = fptosi float [[TMP17]] to i32, !dbg [[DBG192]]
+// SIMD2-NEXT: [[TMP18:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG193:![0-9]+]]
+// SIMD2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[CONV]], !dbg [[DBG193]]
+// SIMD2-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG193]]
+// SIMD2-NEXT: [[TMP19:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG194:![0-9]+]]
+// SIMD2-NEXT: [[TMP20:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG195:![0-9]+]]
+// SIMD2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], [[TMP19]], !dbg [[DBG195]]
+// SIMD2-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG195]]
+// SIMD2-NEXT: [[TMP21:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG196:![0-9]+]]
+// SIMD2-NEXT: ret i32 [[TMP21]], !dbg [[DBG197:![0-9]+]]
// SIMD2: lpad:
// SIMD2-NEXT: [[TMP22:%.*]] = landingpad { ptr, i32 }
-// SIMD2-NEXT: cleanup, !dbg [[DBG199:![0-9]+]]
-// SIMD2-NEXT: [[TMP23:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 0, !dbg [[DBG199]]
-// SIMD2-NEXT: store ptr [[TMP23]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG199]]
-// SIMD2-NEXT: [[TMP24:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 1, !dbg [[DBG199]]
-// SIMD2-NEXT: store i32 [[TMP24]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG199]]
-// SIMD2-NEXT: call void @__cxa_guard_abort(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG175]]
+// SIMD2-NEXT: cleanup, !dbg [[DBG198:![0-9]+]]
+// SIMD2-NEXT: [[TMP23:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 0, !dbg [[DBG198]]
+// SIMD2-NEXT: store ptr [[TMP23]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG198]]
+// SIMD2-NEXT: [[TMP24:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 1, !dbg [[DBG198]]
+// SIMD2-NEXT: store i32 [[TMP24]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG198]]
+// SIMD2-NEXT: call void @__cxa_guard_abort(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG174]]
// SIMD2: eh.resume:
-// SIMD2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG175]]
-// SIMD2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG175]]
-// SIMD2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG175]]
-// SIMD2-NEXT: [[LPAD_VAL8:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG175]]
-// SIMD2-NEXT: resume { ptr, i32 } [[LPAD_VAL8]], !dbg [[DBG175]]
+// SIMD2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG174]]
+// SIMD2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG174]]
+// SIMD2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG174]]
+// SIMD2-NEXT: [[LPAD_VAL8:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG174]]
+// SIMD2-NEXT: resume { ptr, i32 } [[LPAD_VAL8]], !dbg [[DBG174]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC1Ei
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG200:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG199:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META201:![0-9]+]], metadata !DIExpression()), !dbg [[DBG203:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META200:![0-9]+]], metadata !DIExpression()), !dbg [[DBG202:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META204:![0-9]+]], metadata !DIExpression()), !dbg [[DBG205:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META203:![0-9]+]], metadata !DIExpression()), !dbg [[DBG204:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG206:![0-9]+]]
-// SIMD2-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG206]]
-// SIMD2-NEXT: ret void, !dbg [[DBG207:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG205:![0-9]+]]
+// SIMD2-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG205]]
+// SIMD2-NEXT: ret void, !dbg [[DBG206:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD1Ev
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG208:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG207:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META209:![0-9]+]], metadata !DIExpression()), !dbg [[DBG210:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META208:![0-9]+]], metadata !DIExpression()), !dbg [[DBG209:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]], !dbg [[DBG211:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG212:![0-9]+]]
+// SIMD2-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]], !dbg [[DBG210:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG211:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_Z6foobarv
-// SIMD2-SAME: () #[[ATTR6:[0-9]+]] !dbg [[DBG213:![0-9]+]] {
+// SIMD2-SAME: () #[[ATTR2]] !dbg [[DBG212:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[RES:%.*]] = alloca i32, align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META214:![0-9]+]], metadata !DIExpression()), !dbg [[DBG215:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG216:![0-9]+]]
-// SIMD2-NEXT: store i32 [[TMP0]], ptr [[RES]], align 4, !dbg [[DBG217:![0-9]+]]
-// SIMD2-NEXT: [[TMP1:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG218:![0-9]+]]
-// SIMD2-NEXT: [[TMP2:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG219:![0-9]+]]
-// SIMD2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[TMP1]], !dbg [[DBG219]]
-// SIMD2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG219]]
-// SIMD2-NEXT: [[TMP3:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG220:![0-9]+]]
-// SIMD2-NEXT: [[TMP4:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG221:![0-9]+]]
-// SIMD2-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], [[TMP3]], !dbg [[DBG221]]
-// SIMD2-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG221]]
-// SIMD2-NEXT: [[TMP5:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG222:![0-9]+]]
-// SIMD2-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG223:![0-9]+]]
-// SIMD2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG223]]
-// SIMD2-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG223]]
-// SIMD2-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG224:![0-9]+]]
-// SIMD2-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG225:![0-9]+]]
-// SIMD2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG225]]
-// SIMD2-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG225]]
-// SIMD2-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG226:![0-9]+]]
-// SIMD2-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG227:![0-9]+]]
-// SIMD2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG227]]
-// SIMD2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG227]]
-// SIMD2-NEXT: [[TMP11:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG228:![0-9]+]]
-// SIMD2-NEXT: [[CONV:%.*]] = fptosi float [[TMP11]] to i32, !dbg [[DBG228]]
-// SIMD2-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG229:![0-9]+]]
-// SIMD2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], [[CONV]], !dbg [[DBG229]]
-// SIMD2-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG229]]
-// SIMD2-NEXT: [[TMP13:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG230:![0-9]+]]
-// SIMD2-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
-// SIMD2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG231]]
-// SIMD2-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG231]]
-// SIMD2-NEXT: [[TMP15:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG232:![0-9]+]]
-// SIMD2-NEXT: ret i32 [[TMP15]], !dbg [[DBG233:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META213:![0-9]+]], metadata !DIExpression()), !dbg [[DBG214:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG215:![0-9]+]]
+// SIMD2-NEXT: store i32 [[TMP0]], ptr [[RES]], align 4, !dbg [[DBG216:![0-9]+]]
+// SIMD2-NEXT: [[TMP1:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG217:![0-9]+]]
+// SIMD2-NEXT: [[TMP2:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG218:![0-9]+]]
+// SIMD2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[TMP1]], !dbg [[DBG218]]
+// SIMD2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG218]]
+// SIMD2-NEXT: [[TMP3:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG219:![0-9]+]]
+// SIMD2-NEXT: [[TMP4:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG220:![0-9]+]]
+// SIMD2-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], [[TMP3]], !dbg [[DBG220]]
+// SIMD2-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG220]]
+// SIMD2-NEXT: [[TMP5:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG221:![0-9]+]]
+// SIMD2-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG222:![0-9]+]]
+// SIMD2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG222]]
+// SIMD2-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG222]]
+// SIMD2-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG223:![0-9]+]]
+// SIMD2-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG224:![0-9]+]]
+// SIMD2-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG224]]
+// SIMD2-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG224]]
+// SIMD2-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG225:![0-9]+]]
+// SIMD2-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG226:![0-9]+]]
+// SIMD2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG226]]
+// SIMD2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG226]]
+// SIMD2-NEXT: [[TMP11:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG227:![0-9]+]]
+// SIMD2-NEXT: [[CONV:%.*]] = fptosi float [[TMP11]] to i32, !dbg [[DBG227]]
+// SIMD2-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG228:![0-9]+]]
+// SIMD2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], [[CONV]], !dbg [[DBG228]]
+// SIMD2-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG228]]
+// SIMD2-NEXT: [[TMP13:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG229:![0-9]+]]
+// SIMD2-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG230:![0-9]+]]
+// SIMD2-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG230]]
+// SIMD2-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG230]]
+// SIMD2-NEXT: [[TMP15:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
+// SIMD2-NEXT: ret i32 [[TMP15]], !dbg [[DBG232:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
-// SIMD2-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG234:![0-9]+]] {
+// SIMD2-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG233:![0-9]+]] {
// SIMD2-NEXT: entry:
-// SIMD2-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG235:![0-9]+]]
-// SIMD2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG235]]
-// SIMD2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG235]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG234:![0-9]+]]
+// SIMD2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG234]]
+// SIMD2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG234]]
// SIMD2: init.check:
-// SIMD2-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG235]]
-// SIMD2-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG236:![0-9]+]]
-// SIMD2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG235]]
-// SIMD2-NEXT: br label [[INIT_END]], !dbg [[DBG235]]
+// SIMD2-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG234]]
+// SIMD2-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG235:![0-9]+]]
+// SIMD2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG234]]
+// SIMD2-NEXT: br label [[INIT_END]], !dbg [[DBG234]]
// SIMD2: init.end:
-// SIMD2-NEXT: ret void, !dbg [[DBG238:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG237:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S4C1Ei
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG239:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG238:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META240:![0-9]+]], metadata !DIExpression()), !dbg [[DBG242:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META239:![0-9]+]], metadata !DIExpression()), !dbg [[DBG241:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META243:![0-9]+]], metadata !DIExpression()), !dbg [[DBG244:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META242:![0-9]+]], metadata !DIExpression()), !dbg [[DBG243:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG245:![0-9]+]]
-// SIMD2-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG245]]
-// SIMD2-NEXT: ret void, !dbg [[DBG246:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG244:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG244]]
+// SIMD2-NEXT: ret void, !dbg [[DBG245:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S4D1Ev
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG247:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG246:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META248:![0-9]+]], metadata !DIExpression()), !dbg [[DBG249:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META247:![0-9]+]], metadata !DIExpression()), !dbg [[DBG248:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]], !dbg [[DBG250:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG251:![0-9]+]]
+// SIMD2-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]], !dbg [[DBG249:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG250:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S1C2Ei
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG252:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG251:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META253:![0-9]+]], metadata !DIExpression()), !dbg [[DBG254:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META252:![0-9]+]], metadata !DIExpression()), !dbg [[DBG253:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META255:![0-9]+]], metadata !DIExpression()), !dbg [[DBG256:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META254:![0-9]+]], metadata !DIExpression()), !dbg [[DBG255:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG257:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG258:![0-9]+]]
-// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG257]]
-// SIMD2-NEXT: ret void, !dbg [[DBG259:![0-9]+]]
+// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG256:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG257:![0-9]+]]
+// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG256]]
+// SIMD2-NEXT: ret void, !dbg [[DBG258:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S1D2Ev
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG260:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG259:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META261:![0-9]+]], metadata !DIExpression()), !dbg [[DBG262:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META260:![0-9]+]], metadata !DIExpression()), !dbg [[DBG261:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG263:![0-9]+]]
-// SIMD2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG265:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG266:![0-9]+]]
+// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG262:![0-9]+]]
+// SIMD2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG264:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG265:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S2C2Ei
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG267:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG266:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META268:![0-9]+]], metadata !DIExpression()), !dbg [[DBG269:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META267:![0-9]+]], metadata !DIExpression()), !dbg [[DBG268:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META270:![0-9]+]], metadata !DIExpression()), !dbg [[DBG271:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META269:![0-9]+]], metadata !DIExpression()), !dbg [[DBG270:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG272:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG273:![0-9]+]]
-// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG272]]
-// SIMD2-NEXT: ret void, !dbg [[DBG274:![0-9]+]]
+// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG271:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG272:![0-9]+]]
+// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG271]]
+// SIMD2-NEXT: ret void, !dbg [[DBG273:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S2D2Ev
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG275:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG274:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META276:![0-9]+]], metadata !DIExpression()), !dbg [[DBG277:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META275:![0-9]+]], metadata !DIExpression()), !dbg [[DBG276:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG278:![0-9]+]]
-// SIMD2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG280:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG281:![0-9]+]]
+// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG277:![0-9]+]]
+// SIMD2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG279:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG280:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC2Ei
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG282:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG281:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META283:![0-9]+]], metadata !DIExpression()), !dbg [[DBG284:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META282:![0-9]+]], metadata !DIExpression()), !dbg [[DBG283:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META285:![0-9]+]], metadata !DIExpression()), !dbg [[DBG286:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META284:![0-9]+]], metadata !DIExpression()), !dbg [[DBG285:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG287:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG288:![0-9]+]]
-// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG287]]
-// SIMD2-NEXT: ret void, !dbg [[DBG289:![0-9]+]]
+// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG286:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG287:![0-9]+]]
+// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG286]]
+// SIMD2-NEXT: ret void, !dbg [[DBG288:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD2Ev
-// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG290:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG289:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META291:![0-9]+]], metadata !DIExpression()), !dbg [[DBG292:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META290:![0-9]+]], metadata !DIExpression()), !dbg [[DBG291:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG293:![0-9]+]]
-// SIMD2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG295:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG296:![0-9]+]]
+// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG292:![0-9]+]]
+// SIMD2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG294:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG295:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S4C2Ei
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG297:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG296:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META298:![0-9]+]], metadata !DIExpression()), !dbg [[DBG299:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META297:![0-9]+]], metadata !DIExpression()), !dbg [[DBG298:![0-9]+]]
// SIMD2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META300:![0-9]+]], metadata !DIExpression()), !dbg [[DBG301:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META299:![0-9]+]], metadata !DIExpression()), !dbg [[DBG300:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG302:![0-9]+]]
-// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG303:![0-9]+]]
-// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG302]]
-// SIMD2-NEXT: ret void, !dbg [[DBG304:![0-9]+]]
+// SIMD2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG301:![0-9]+]]
+// SIMD2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG302:![0-9]+]]
+// SIMD2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG301]]
+// SIMD2-NEXT: ret void, !dbg [[DBG303:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_ZN2S4D2Ev
-// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG305:![0-9]+]] {
+// SIMD2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG304:![0-9]+]] {
// SIMD2-NEXT: entry:
// SIMD2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META306:![0-9]+]], metadata !DIExpression()), !dbg [[DBG307:![0-9]+]]
+// SIMD2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META305:![0-9]+]], metadata !DIExpression()), !dbg [[DBG306:![0-9]+]]
// SIMD2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG308:![0-9]+]]
-// SIMD2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG310:![0-9]+]]
-// SIMD2-NEXT: ret void, !dbg [[DBG311:![0-9]+]]
+// SIMD2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG307:![0-9]+]]
+// SIMD2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG309:![0-9]+]]
+// SIMD2-NEXT: ret void, !dbg [[DBG310:![0-9]+]]
//
//
// SIMD2-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// SIMD2-SAME: () #[[ATTR0]] !dbg [[DBG312:![0-9]+]] {
+// SIMD2-SAME: () #[[ATTR0]] !dbg [[DBG311:![0-9]+]] {
// SIMD2-NEXT: entry:
-// SIMD2-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG314:![0-9]+]]
-// SIMD2-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG314]]
-// SIMD2-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG314]]
+// SIMD2-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG313:![0-9]+]]
+// SIMD2-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG313]]
+// SIMD2-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG313]]
// SIMD2-NEXT: ret void
//
//
@@ -3445,34 +3445,34 @@ int foobar() {
// CHECK-TLS1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK-TLS1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK-TLS1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// CHECK-TLS1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK-TLS1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK-TLS1: invoke.cont:
// CHECK-TLS1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK-TLS1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// CHECK-TLS1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
+// CHECK-TLS1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
// CHECK-TLS1: invoke.cont2:
// CHECK-TLS1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK-TLS1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// CHECK-TLS1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// CHECK-TLS1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// CHECK-TLS1: invoke.cont3:
// CHECK-TLS1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK-TLS1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK-TLS1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// CHECK-TLS1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
+// CHECK-TLS1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
// CHECK-TLS1: invoke.cont7:
// CHECK-TLS1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK-TLS1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// CHECK-TLS1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
+// CHECK-TLS1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
// CHECK-TLS1: invoke.cont8:
// CHECK-TLS1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK-TLS1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// CHECK-TLS1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
+// CHECK-TLS1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
// CHECK-TLS1: invoke.cont9:
// CHECK-TLS1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]]
// CHECK-TLS1-NEXT: ret void
// CHECK-TLS1: lpad:
// CHECK-TLS1-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS1-NEXT: cleanup
+// CHECK-TLS1-NEXT: cleanup
// CHECK-TLS1-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0
// CHECK-TLS1-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8
// CHECK-TLS1-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1
@@ -3490,7 +3490,7 @@ int foobar() {
// CHECK-TLS1-NEXT: br label [[EHCLEANUP:%.*]]
// CHECK-TLS1: lpad6:
// CHECK-TLS1-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS1-NEXT: cleanup
+// CHECK-TLS1-NEXT: cleanup
// CHECK-TLS1-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
// CHECK-TLS1-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
// CHECK-TLS1-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
@@ -3708,7 +3708,7 @@ int foobar() {
//
//
// CHECK-TLS1-LABEL: define {{[^@]+}}@_Z6foobarv
-// CHECK-TLS1-SAME: () #[[ATTR7:[0-9]+]] {
+// CHECK-TLS1-SAME: () #[[ATTR1]] {
// CHECK-TLS1-NEXT: entry:
// CHECK-TLS1-NEXT: [[RES:%.*]] = alloca i32, align 4
// CHECK-TLS1-NEXT: [[TMP0:%.*]] = call ptr @_ZTWN6Static1sE()
@@ -3997,7 +3997,7 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@_Z6foobarv
-// CHECK-TLS2-SAME: () #[[ATTR6:[0-9]+]] {
+// CHECK-TLS2-SAME: () #[[ATTR2]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: [[RES:%.*]] = alloca i32, align 4
// CHECK-TLS2-NEXT: [[TMP0:%.*]] = call ptr @_ZTWN6Static1sE()
@@ -4050,7 +4050,7 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@__cxx_global_var_init
-// CHECK-TLS2-SAME: () #[[ATTR7:[0-9]+]] {
+// CHECK-TLS2-SAME: () #[[ATTR6:[0-9]+]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5)
// CHECK-TLS2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR4]]
@@ -4106,7 +4106,7 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
-// CHECK-TLS2-SAME: () #[[ATTR7]] {
+// CHECK-TLS2-SAME: () #[[ATTR6]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27)
// CHECK-TLS2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR4]]
@@ -4162,7 +4162,7 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
-// CHECK-TLS2-SAME: () #[[ATTR7]] personality ptr @__gxx_personality_v0 {
+// CHECK-TLS2-SAME: () #[[ATTR6]] personality ptr @__gxx_personality_v0 {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// CHECK-TLS2-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
@@ -4172,34 +4172,34 @@ int foobar() {
// CHECK-TLS2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK-TLS2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK-TLS2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// CHECK-TLS2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// CHECK-TLS2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// CHECK-TLS2: invoke.cont:
// CHECK-TLS2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK-TLS2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// CHECK-TLS2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
+// CHECK-TLS2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
// CHECK-TLS2: invoke.cont2:
// CHECK-TLS2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// CHECK-TLS2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// CHECK-TLS2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// CHECK-TLS2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// CHECK-TLS2: invoke.cont3:
// CHECK-TLS2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8
// CHECK-TLS2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK-TLS2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// CHECK-TLS2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
+// CHECK-TLS2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
// CHECK-TLS2: invoke.cont7:
// CHECK-TLS2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK-TLS2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// CHECK-TLS2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
+// CHECK-TLS2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
// CHECK-TLS2: invoke.cont8:
// CHECK-TLS2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// CHECK-TLS2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// CHECK-TLS2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
+// CHECK-TLS2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
// CHECK-TLS2: invoke.cont9:
// CHECK-TLS2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR4]]
// CHECK-TLS2-NEXT: ret void
// CHECK-TLS2: lpad:
// CHECK-TLS2-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS2-NEXT: cleanup
+// CHECK-TLS2-NEXT: cleanup
// CHECK-TLS2-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0
// CHECK-TLS2-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8
// CHECK-TLS2-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1
@@ -4217,7 +4217,7 @@ int foobar() {
// CHECK-TLS2-NEXT: br label [[EHCLEANUP:%.*]]
// CHECK-TLS2: lpad6:
// CHECK-TLS2-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS2-NEXT: cleanup
+// CHECK-TLS2-NEXT: cleanup
// CHECK-TLS2-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
// CHECK-TLS2-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
// CHECK-TLS2-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
@@ -4255,7 +4255,7 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// CHECK-TLS2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR7]] {
+// CHECK-TLS2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR6]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
@@ -4296,7 +4296,7 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
-// CHECK-TLS2-SAME: () #[[ATTR7]] {
+// CHECK-TLS2-SAME: () #[[ATTR6]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8
// CHECK-TLS2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
@@ -4359,14 +4359,14 @@ int foobar() {
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// CHECK-TLS2-SAME: () #[[ATTR7]] {
+// CHECK-TLS2-SAME: () #[[ATTR6]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: call void @__cxx_global_var_init.1()
// CHECK-TLS2-NEXT: ret void
//
//
// CHECK-TLS2-LABEL: define {{[^@]+}}@__tls_init
-// CHECK-TLS2-SAME: () #[[ATTR7]] {
+// CHECK-TLS2-SAME: () #[[ATTR6]] {
// CHECK-TLS2-NEXT: entry:
// CHECK-TLS2-NEXT: [[TMP0:%.*]] = load i8, ptr @__tls_guard, align 1
// CHECK-TLS2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0
@@ -4383,235 +4383,235 @@ int foobar() {
// CHECK-TLS3-LABEL: define {{[^@]+}}@__cxx_global_var_init
// CHECK-TLS3-SAME: () #[[ATTR0:[0-9]+]] !dbg [[DBG116:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
-// CHECK-TLS3-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG120:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR3:[0-9]+]], !dbg [[DBG122:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG123:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG119:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR3:[0-9]+]], !dbg [[DBG121:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG122:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S1C1Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 !dbg [[DBG124:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 !dbg [[DBG123:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META125:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META124:![0-9]+]], metadata !DIExpression()), !dbg [[DBG126:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META128:![0-9]+]], metadata !DIExpression()), !dbg [[DBG129:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META127:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG130:![0-9]+]]
-// CHECK-TLS3-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG130]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG131:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG129:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG129]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG130:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S1D1Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG132:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG131:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META133:![0-9]+]], metadata !DIExpression()), !dbg [[DBG134:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]], !dbg [[DBG135:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG136:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]], !dbg [[DBG134:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG135:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S1C2Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG137:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG136:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META138:![0-9]+]], metadata !DIExpression()), !dbg [[DBG139:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META137:![0-9]+]], metadata !DIExpression()), !dbg [[DBG138:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META140:![0-9]+]], metadata !DIExpression()), !dbg [[DBG141:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META139:![0-9]+]], metadata !DIExpression()), !dbg [[DBG140:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG142:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG143:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG142]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG144:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG141:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG142:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG141]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG143:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S1D2Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG145:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG144:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META146:![0-9]+]], metadata !DIExpression()), !dbg [[DBG147:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META145:![0-9]+]], metadata !DIExpression()), !dbg [[DBG146:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG148:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG150:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG151:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG147:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG149:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG150:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
-// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG152:![0-9]+]] {
+// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG151:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
-// CHECK-TLS3-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG153:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG155:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG156:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG152:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG154:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG155:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S2C1Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG157:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG156:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META158:![0-9]+]], metadata !DIExpression()), !dbg [[DBG160:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META157:![0-9]+]], metadata !DIExpression()), !dbg [[DBG159:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META161:![0-9]+]], metadata !DIExpression()), !dbg [[DBG162:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META160:![0-9]+]], metadata !DIExpression()), !dbg [[DBG161:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG163:![0-9]+]]
-// CHECK-TLS3-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG163]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG164:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG162:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG162]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG163:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S2D1Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG165:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG164:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META166:![0-9]+]], metadata !DIExpression()), !dbg [[DBG167:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META165:![0-9]+]], metadata !DIExpression()), !dbg [[DBG166:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR3]], !dbg [[DBG168:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG169:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR3]], !dbg [[DBG167:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG168:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S2C2Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG170:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG169:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META171:![0-9]+]], metadata !DIExpression()), !dbg [[DBG172:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META170:![0-9]+]], metadata !DIExpression()), !dbg [[DBG171:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META173:![0-9]+]], metadata !DIExpression()), !dbg [[DBG174:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META172:![0-9]+]], metadata !DIExpression()), !dbg [[DBG173:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG175:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG176:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG175]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG177:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG174:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG175:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG174]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG176:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S2D2Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG178:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG177:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META179:![0-9]+]], metadata !DIExpression()), !dbg [[DBG180:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META178:![0-9]+]], metadata !DIExpression()), !dbg [[DBG179:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG181:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG183:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG184:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG180:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG182:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG183:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
-// CHECK-TLS3-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG185:![0-9]+]] {
+// CHECK-TLS3-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG184:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: [[ARRAYINIT_ENDOFINIT5:%.*]] = alloca ptr, align 8
-// CHECK-TLS3-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG186:![0-9]+]]
-// CHECK-TLS3-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG188:![0-9]+]]
+// CHECK-TLS3-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG185:![0-9]+]]
+// CHECK-TLS3-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG187:![0-9]+]]
// CHECK-TLS3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// CHECK-TLS3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG189:![0-9]+]]
+// CHECK-TLS3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG188:![0-9]+]]
// CHECK-TLS3: invoke.cont:
-// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG188]]
+// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG187]]
// CHECK-TLS3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// CHECK-TLS3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG190:![0-9]+]]
+// CHECK-TLS3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG189:![0-9]+]]
// CHECK-TLS3: invoke.cont2:
-// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG188]]
+// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG187]]
// CHECK-TLS3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// CHECK-TLS3-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG191:![0-9]+]]
+// CHECK-TLS3-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG190:![0-9]+]]
// CHECK-TLS3: invoke.cont3:
-// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG192:![0-9]+]]
+// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG191:![0-9]+]]
// CHECK-TLS3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// CHECK-TLS3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG193:![0-9]+]]
+// CHECK-TLS3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG192:![0-9]+]]
// CHECK-TLS3: invoke.cont7:
-// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG192]]
+// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG191]]
// CHECK-TLS3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// CHECK-TLS3-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG194:![0-9]+]]
+// CHECK-TLS3-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG193:![0-9]+]]
// CHECK-TLS3: invoke.cont8:
-// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG192]]
+// CHECK-TLS3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG191]]
// CHECK-TLS3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// CHECK-TLS3-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG195:![0-9]+]]
+// CHECK-TLS3-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG194:![0-9]+]]
// CHECK-TLS3: invoke.cont9:
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG196:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG195:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG195]]
// CHECK-TLS3: lpad:
// CHECK-TLS3-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS3-NEXT: cleanup, !dbg [[DBG197:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG188]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG188]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG188]]
+// CHECK-TLS3-NEXT: cleanup, !dbg [[DBG196:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG187]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG187]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG187]]
// CHECK-TLS3: arraydestroy.body:
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG188]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG188]]
-// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG188]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG188]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG188]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG187]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG187]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG187]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG187]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG187]]
// CHECK-TLS3: arraydestroy.done4:
-// CHECK-TLS3-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG188]]
+// CHECK-TLS3-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG187]]
// CHECK-TLS3: lpad6:
// CHECK-TLS3-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS3-NEXT: cleanup, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG197]]
-// CHECK-TLS3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG192]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG192]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG192]]
+// CHECK-TLS3-NEXT: cleanup, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG196]]
+// CHECK-TLS3-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG191]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG191]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG191]]
// CHECK-TLS3: arraydestroy.body11:
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG192]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG192]]
-// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR3]], !dbg [[DBG192]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG192]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG192]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG191]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG191]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR3]], !dbg [[DBG191]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG191]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG191]]
// CHECK-TLS3: arraydestroy.done15:
-// CHECK-TLS3-NEXT: br label [[EHCLEANUP]], !dbg [[DBG192]]
+// CHECK-TLS3-NEXT: br label [[EHCLEANUP]], !dbg [[DBG191]]
// CHECK-TLS3: ehcleanup:
-// CHECK-TLS3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG186]]
+// CHECK-TLS3-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG185]]
// CHECK-TLS3: arraydestroy.body17:
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR3]], !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG186]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR3]], !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG185]]
// CHECK-TLS3: arraydestroy.done21:
-// CHECK-TLS3-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG186]]
+// CHECK-TLS3-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG185]]
// CHECK-TLS3: eh.resume:
-// CHECK-TLS3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG186]]
-// CHECK-TLS3-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG186]]
+// CHECK-TLS3-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG185]]
+// CHECK-TLS3-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG185]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// CHECK-TLS3-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG198:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG197:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META202:![0-9]+]], metadata !DIExpression()), !dbg [[DBG203:![0-9]+]]
-// CHECK-TLS3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG203]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META201:![0-9]+]], metadata !DIExpression()), !dbg [[DBG202:![0-9]+]]
+// CHECK-TLS3-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG202]]
// CHECK-TLS3: arraydestroy.body:
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG203]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG203]]
-// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG203]]
-// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG203]]
-// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG203]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG202]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG202]]
+// CHECK-TLS3-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG202]]
+// CHECK-TLS3-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG202]]
+// CHECK-TLS3-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG202]]
// CHECK-TLS3: arraydestroy.done1:
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG203]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG202]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@main
@@ -4620,72 +4620,72 @@ int foobar() {
// CHECK-TLS3-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: [[RES:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META204:![0-9]+]], metadata !DIExpression()), !dbg [[DBG205:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVZ4mainE2sm, align 1, !dbg [[DBG206:![0-9]+]]
-// CHECK-TLS3-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG206]]
-// CHECK-TLS3-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG206]], !prof [[PROF207:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META203:![0-9]+]], metadata !DIExpression()), !dbg [[DBG204:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVZ4mainE2sm, align 1, !dbg [[DBG205:![0-9]+]]
+// CHECK-TLS3-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG205]]
+// CHECK-TLS3-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG205]], !prof [[PROF206:![0-9]+]]
// CHECK-TLS3: init.check:
-// CHECK-TLS3-NEXT: [[TMP1:%.*]] = call ptr @_ZTWL3gs1(), !dbg [[DBG208:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP1]], i32 0, i32 0, !dbg [[DBG209:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG209]]
-// CHECK-TLS3-NEXT: call void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP2]]), !dbg [[DBG210:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP3:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG206]]
-// CHECK-TLS3-NEXT: store i8 1, ptr @_ZGVZ4mainE2sm, align 1, !dbg [[DBG206]]
-// CHECK-TLS3-NEXT: br label [[INIT_END]], !dbg [[DBG206]]
+// CHECK-TLS3-NEXT: [[TMP1:%.*]] = call ptr @_ZTWL3gs1(), !dbg [[DBG207:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP1]], i32 0, i32 0, !dbg [[DBG208:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG208]]
+// CHECK-TLS3-NEXT: call void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP2]]), !dbg [[DBG209:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP3:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG205]]
+// CHECK-TLS3-NEXT: store i8 1, ptr @_ZGVZ4mainE2sm, align 1, !dbg [[DBG205]]
+// CHECK-TLS3-NEXT: br label [[INIT_END]], !dbg [[DBG205]]
// CHECK-TLS3: init.end:
-// CHECK-TLS3-NEXT: [[TMP4:%.*]] = call ptr @_ZTWN6Static1sE(), !dbg [[DBG211:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP4]], i32 0, i32 0, !dbg [[DBG212:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG212]]
-// CHECK-TLS3-NEXT: store i32 [[TMP5]], ptr [[RES]], align 4, !dbg [[DBG213:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP6:%.*]] = call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @_ZZ4mainE2sm), !dbg [[DBG214:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[TMP6]], i32 0, i32 0, !dbg [[DBG215:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A2]], align 8, !dbg [[DBG215]]
-// CHECK-TLS3-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG216:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG216]]
-// CHECK-TLS3-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG216]]
-// CHECK-TLS3-NEXT: [[TMP9:%.*]] = call ptr @_ZTWL3gs1(), !dbg [[DBG217:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP9]], i32 0, i32 0, !dbg [[DBG218:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP10:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG218]]
-// CHECK-TLS3-NEXT: [[TMP11:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG219:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], [[TMP10]], !dbg [[DBG219]]
-// CHECK-TLS3-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG219]]
-// CHECK-TLS3-NEXT: [[TMP12:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG220:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP13:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG221:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP13]], [[TMP12]], !dbg [[DBG221]]
-// CHECK-TLS3-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG221]]
-// CHECK-TLS3-NEXT: [[TMP14:%.*]] = call ptr @_ZTW3gs3(), !dbg [[DBG222:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP14]], i32 0, i32 0, !dbg [[DBG223:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP15:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG223]]
-// CHECK-TLS3-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG224:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG224]]
-// CHECK-TLS3-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG224]]
-// CHECK-TLS3-NEXT: [[TMP17:%.*]] = call ptr @_ZTW5arr_x(), !dbg [[DBG225:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP17]], i64 0, i64 1, !dbg [[DBG225]]
-// CHECK-TLS3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG225]]
-// CHECK-TLS3-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX8]], i32 0, i32 0, !dbg [[DBG226:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP18:%.*]] = load i32, ptr [[A9]], align 4, !dbg [[DBG226]]
-// CHECK-TLS3-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG227:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP19]], [[TMP18]], !dbg [[DBG227]]
-// CHECK-TLS3-NEXT: store i32 [[ADD10]], ptr [[RES]], align 4, !dbg [[DBG227]]
-// CHECK-TLS3-NEXT: [[TMP20:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIiE2stE), !dbg [[DBG228:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4, !dbg [[DBG228]]
-// CHECK-TLS3-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG229:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG229]]
-// CHECK-TLS3-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG229]]
-// CHECK-TLS3-NEXT: [[TMP23:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIfE2stE), !dbg [[DBG230:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP24:%.*]] = load float, ptr [[TMP23]], align 4, !dbg [[DBG230]]
-// CHECK-TLS3-NEXT: [[CONV:%.*]] = fptosi float [[TMP24]] to i32, !dbg [[DBG230]]
-// CHECK-TLS3-NEXT: [[TMP25:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP25]], [[CONV]], !dbg [[DBG231]]
-// CHECK-TLS3-NEXT: store i32 [[ADD12]], ptr [[RES]], align 4, !dbg [[DBG231]]
-// CHECK-TLS3-NEXT: [[TMP26:%.*]] = call ptr @_ZTWN2STI2S4E2stE(), !dbg [[DBG232:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP26]], i32 0, i32 0, !dbg [[DBG233:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A13]], align 4, !dbg [[DBG233]]
-// CHECK-TLS3-NEXT: [[TMP28:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG234:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP28]], [[TMP27]], !dbg [[DBG234]]
-// CHECK-TLS3-NEXT: store i32 [[ADD14]], ptr [[RES]], align 4, !dbg [[DBG234]]
-// CHECK-TLS3-NEXT: [[TMP29:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG235:![0-9]+]]
-// CHECK-TLS3-NEXT: ret i32 [[TMP29]], !dbg [[DBG236:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP4:%.*]] = call ptr @_ZTWN6Static1sE(), !dbg [[DBG210:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP4]], i32 0, i32 0, !dbg [[DBG211:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP5:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG211]]
+// CHECK-TLS3-NEXT: store i32 [[TMP5]], ptr [[RES]], align 4, !dbg [[DBG212:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP6:%.*]] = call align 8 ptr @llvm.threadlocal.address.p0(ptr align 8 @_ZZ4mainE2sm), !dbg [[DBG213:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[TMP6]], i32 0, i32 0, !dbg [[DBG214:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP7:%.*]] = load i32, ptr [[A2]], align 8, !dbg [[DBG214]]
+// CHECK-TLS3-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG215:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG215]]
+// CHECK-TLS3-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG215]]
+// CHECK-TLS3-NEXT: [[TMP9:%.*]] = call ptr @_ZTWL3gs1(), !dbg [[DBG216:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP9]], i32 0, i32 0, !dbg [[DBG217:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP10:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG217]]
+// CHECK-TLS3-NEXT: [[TMP11:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG218:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP11]], [[TMP10]], !dbg [[DBG218]]
+// CHECK-TLS3-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG218]]
+// CHECK-TLS3-NEXT: [[TMP12:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG219:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP13:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG220:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP13]], [[TMP12]], !dbg [[DBG220]]
+// CHECK-TLS3-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG220]]
+// CHECK-TLS3-NEXT: [[TMP14:%.*]] = call ptr @_ZTW3gs3(), !dbg [[DBG221:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP14]], i32 0, i32 0, !dbg [[DBG222:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP15:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG222]]
+// CHECK-TLS3-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG223:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG223]]
+// CHECK-TLS3-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG223]]
+// CHECK-TLS3-NEXT: [[TMP17:%.*]] = call ptr @_ZTW5arr_x(), !dbg [[DBG224:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP17]], i64 0, i64 1, !dbg [[DBG224]]
+// CHECK-TLS3-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG224]]
+// CHECK-TLS3-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX8]], i32 0, i32 0, !dbg [[DBG225:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP18:%.*]] = load i32, ptr [[A9]], align 4, !dbg [[DBG225]]
+// CHECK-TLS3-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG226:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP19]], [[TMP18]], !dbg [[DBG226]]
+// CHECK-TLS3-NEXT: store i32 [[ADD10]], ptr [[RES]], align 4, !dbg [[DBG226]]
+// CHECK-TLS3-NEXT: [[TMP20:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIiE2stE), !dbg [[DBG227:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP21:%.*]] = load i32, ptr [[TMP20]], align 4, !dbg [[DBG227]]
+// CHECK-TLS3-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG228:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG228]]
+// CHECK-TLS3-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG228]]
+// CHECK-TLS3-NEXT: [[TMP23:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIfE2stE), !dbg [[DBG229:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP24:%.*]] = load float, ptr [[TMP23]], align 4, !dbg [[DBG229]]
+// CHECK-TLS3-NEXT: [[CONV:%.*]] = fptosi float [[TMP24]] to i32, !dbg [[DBG229]]
+// CHECK-TLS3-NEXT: [[TMP25:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG230:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP25]], [[CONV]], !dbg [[DBG230]]
+// CHECK-TLS3-NEXT: store i32 [[ADD12]], ptr [[RES]], align 4, !dbg [[DBG230]]
+// CHECK-TLS3-NEXT: [[TMP26:%.*]] = call ptr @_ZTWN2STI2S4E2stE(), !dbg [[DBG231:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP26]], i32 0, i32 0, !dbg [[DBG232:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP27:%.*]] = load i32, ptr [[A13]], align 4, !dbg [[DBG232]]
+// CHECK-TLS3-NEXT: [[TMP28:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG233:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP28]], [[TMP27]], !dbg [[DBG233]]
+// CHECK-TLS3-NEXT: store i32 [[ADD14]], ptr [[RES]], align 4, !dbg [[DBG233]]
+// CHECK-TLS3-NEXT: [[TMP29:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG234:![0-9]+]]
+// CHECK-TLS3-NEXT: ret i32 [[TMP29]], !dbg [[DBG235:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZTWL3gs1
@@ -4696,29 +4696,29 @@ int foobar() {
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC1Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG237:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG236:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META238:![0-9]+]], metadata !DIExpression()), !dbg [[DBG240:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META237:![0-9]+]], metadata !DIExpression()), !dbg [[DBG239:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META241:![0-9]+]], metadata !DIExpression()), !dbg [[DBG242:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META240:![0-9]+]], metadata !DIExpression()), !dbg [[DBG241:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG243:![0-9]+]]
-// CHECK-TLS3-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG243]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG244:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG242:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG242]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG243:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD1Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG245:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG244:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META246:![0-9]+]], metadata !DIExpression()), !dbg [[DBG247:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META245:![0-9]+]], metadata !DIExpression()), !dbg [[DBG246:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]], !dbg [[DBG248:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG249:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]], !dbg [[DBG247:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG248:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZTWN6Static1sE
@@ -4758,174 +4758,174 @@ int foobar() {
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC2Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG250:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG249:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META251:![0-9]+]], metadata !DIExpression()), !dbg [[DBG252:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META250:![0-9]+]], metadata !DIExpression()), !dbg [[DBG251:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META253:![0-9]+]], metadata !DIExpression()), !dbg [[DBG254:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META252:![0-9]+]], metadata !DIExpression()), !dbg [[DBG253:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG255:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG256:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG255]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG257:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG254:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG255:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG254]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG256:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD2Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG258:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG257:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META259:![0-9]+]], metadata !DIExpression()), !dbg [[DBG260:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META258:![0-9]+]], metadata !DIExpression()), !dbg [[DBG259:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG261:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG263:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG264:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG260:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG262:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG263:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_Z6foobarv
-// CHECK-TLS3-SAME: () #[[ATTR7:[0-9]+]] !dbg [[DBG265:![0-9]+]] {
+// CHECK-TLS3-SAME: () #[[ATTR1]] !dbg [[DBG264:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[RES:%.*]] = alloca i32, align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META266:![0-9]+]], metadata !DIExpression()), !dbg [[DBG267:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call ptr @_ZTWN6Static1sE(), !dbg [[DBG268:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP0]], i32 0, i32 0, !dbg [[DBG269:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG269]]
-// CHECK-TLS3-NEXT: store i32 [[TMP1]], ptr [[RES]], align 4, !dbg [[DBG270:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP2:%.*]] = call ptr @_ZTWL3gs1(), !dbg [[DBG271:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP2]], i32 0, i32 0, !dbg [[DBG272:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG272]]
-// CHECK-TLS3-NEXT: [[TMP4:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG273:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP3]], !dbg [[DBG273]]
-// CHECK-TLS3-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG273]]
-// CHECK-TLS3-NEXT: [[TMP5:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG274:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG275:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG275]]
-// CHECK-TLS3-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG275]]
-// CHECK-TLS3-NEXT: [[TMP7:%.*]] = call ptr @_ZTW3gs3(), !dbg [[DBG276:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP7]], i32 0, i32 0, !dbg [[DBG277:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG277]]
-// CHECK-TLS3-NEXT: [[TMP9:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG278:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], [[TMP8]], !dbg [[DBG278]]
-// CHECK-TLS3-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG278]]
-// CHECK-TLS3-NEXT: [[TMP10:%.*]] = call ptr @_ZTW5arr_x(), !dbg [[DBG279:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP10]], i64 0, i64 1, !dbg [[DBG279]]
-// CHECK-TLS3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG279]]
-// CHECK-TLS3-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX5]], i32 0, i32 0, !dbg [[DBG280:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP11:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG280]]
-// CHECK-TLS3-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG281:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP12]], [[TMP11]], !dbg [[DBG281]]
-// CHECK-TLS3-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG281]]
-// CHECK-TLS3-NEXT: [[TMP13:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIiE2stE), !dbg [[DBG282:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4, !dbg [[DBG282]]
-// CHECK-TLS3-NEXT: [[TMP15:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG283:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], [[TMP14]], !dbg [[DBG283]]
-// CHECK-TLS3-NEXT: store i32 [[ADD8]], ptr [[RES]], align 4, !dbg [[DBG283]]
-// CHECK-TLS3-NEXT: [[TMP16:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIfE2stE), !dbg [[DBG284:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP17:%.*]] = load float, ptr [[TMP16]], align 4, !dbg [[DBG284]]
-// CHECK-TLS3-NEXT: [[CONV:%.*]] = fptosi float [[TMP17]] to i32, !dbg [[DBG284]]
-// CHECK-TLS3-NEXT: [[TMP18:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG285:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP18]], [[CONV]], !dbg [[DBG285]]
-// CHECK-TLS3-NEXT: store i32 [[ADD9]], ptr [[RES]], align 4, !dbg [[DBG285]]
-// CHECK-TLS3-NEXT: [[TMP19:%.*]] = call ptr @_ZTWN2STI2S4E2stE(), !dbg [[DBG286:![0-9]+]]
-// CHECK-TLS3-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP19]], i32 0, i32 0, !dbg [[DBG287:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP20:%.*]] = load i32, ptr [[A10]], align 4, !dbg [[DBG287]]
-// CHECK-TLS3-NEXT: [[TMP21:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG288:![0-9]+]]
-// CHECK-TLS3-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP21]], [[TMP20]], !dbg [[DBG288]]
-// CHECK-TLS3-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG288]]
-// CHECK-TLS3-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG289:![0-9]+]]
-// CHECK-TLS3-NEXT: ret i32 [[TMP22]], !dbg [[DBG290:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META265:![0-9]+]], metadata !DIExpression()), !dbg [[DBG266:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = call ptr @_ZTWN6Static1sE(), !dbg [[DBG267:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP0]], i32 0, i32 0, !dbg [[DBG268:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG268]]
+// CHECK-TLS3-NEXT: store i32 [[TMP1]], ptr [[RES]], align 4, !dbg [[DBG269:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP2:%.*]] = call ptr @_ZTWL3gs1(), !dbg [[DBG270:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP2]], i32 0, i32 0, !dbg [[DBG271:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP3:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG271]]
+// CHECK-TLS3-NEXT: [[TMP4:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG272:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP4]], [[TMP3]], !dbg [[DBG272]]
+// CHECK-TLS3-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG272]]
+// CHECK-TLS3-NEXT: [[TMP5:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG273:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG274:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG274]]
+// CHECK-TLS3-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG274]]
+// CHECK-TLS3-NEXT: [[TMP7:%.*]] = call ptr @_ZTW3gs3(), !dbg [[DBG275:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP7]], i32 0, i32 0, !dbg [[DBG276:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP8:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG276]]
+// CHECK-TLS3-NEXT: [[TMP9:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG277:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP9]], [[TMP8]], !dbg [[DBG277]]
+// CHECK-TLS3-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG277]]
+// CHECK-TLS3-NEXT: [[TMP10:%.*]] = call ptr @_ZTW5arr_x(), !dbg [[DBG278:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP10]], i64 0, i64 1, !dbg [[DBG278]]
+// CHECK-TLS3-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG278]]
+// CHECK-TLS3-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX5]], i32 0, i32 0, !dbg [[DBG279:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP11:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG279]]
+// CHECK-TLS3-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG280:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP12]], [[TMP11]], !dbg [[DBG280]]
+// CHECK-TLS3-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG280]]
+// CHECK-TLS3-NEXT: [[TMP13:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIiE2stE), !dbg [[DBG281:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP14:%.*]] = load i32, ptr [[TMP13]], align 4, !dbg [[DBG281]]
+// CHECK-TLS3-NEXT: [[TMP15:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG282:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP15]], [[TMP14]], !dbg [[DBG282]]
+// CHECK-TLS3-NEXT: store i32 [[ADD8]], ptr [[RES]], align 4, !dbg [[DBG282]]
+// CHECK-TLS3-NEXT: [[TMP16:%.*]] = call align 4 ptr @llvm.threadlocal.address.p0(ptr align 4 @_ZN2STIfE2stE), !dbg [[DBG283:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP17:%.*]] = load float, ptr [[TMP16]], align 4, !dbg [[DBG283]]
+// CHECK-TLS3-NEXT: [[CONV:%.*]] = fptosi float [[TMP17]] to i32, !dbg [[DBG283]]
+// CHECK-TLS3-NEXT: [[TMP18:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG284:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP18]], [[CONV]], !dbg [[DBG284]]
+// CHECK-TLS3-NEXT: store i32 [[ADD9]], ptr [[RES]], align 4, !dbg [[DBG284]]
+// CHECK-TLS3-NEXT: [[TMP19:%.*]] = call ptr @_ZTWN2STI2S4E2stE(), !dbg [[DBG285:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP19]], i32 0, i32 0, !dbg [[DBG286:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP20:%.*]] = load i32, ptr [[A10]], align 4, !dbg [[DBG286]]
+// CHECK-TLS3-NEXT: [[TMP21:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG287:![0-9]+]]
+// CHECK-TLS3-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP21]], [[TMP20]], !dbg [[DBG287]]
+// CHECK-TLS3-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG287]]
+// CHECK-TLS3-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG288:![0-9]+]]
+// CHECK-TLS3-NEXT: ret i32 [[TMP22]], !dbg [[DBG289:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
-// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG291:![0-9]+]] {
+// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG290:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG292:![0-9]+]]
-// CHECK-TLS3-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG292]]
-// CHECK-TLS3-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG292]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG291:![0-9]+]]
+// CHECK-TLS3-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG291]]
+// CHECK-TLS3-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG291]]
// CHECK-TLS3: init.check:
-// CHECK-TLS3-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG292]]
-// CHECK-TLS3-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG293:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG292]]
-// CHECK-TLS3-NEXT: br label [[INIT_END]], !dbg [[DBG292]]
+// CHECK-TLS3-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG291]]
+// CHECK-TLS3-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG292:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG291]]
+// CHECK-TLS3-NEXT: br label [[INIT_END]], !dbg [[DBG291]]
// CHECK-TLS3: init.end:
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG295:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG294:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S4C1Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG296:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG295:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META297:![0-9]+]], metadata !DIExpression()), !dbg [[DBG299:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META296:![0-9]+]], metadata !DIExpression()), !dbg [[DBG298:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META300:![0-9]+]], metadata !DIExpression()), !dbg [[DBG301:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META299:![0-9]+]], metadata !DIExpression()), !dbg [[DBG300:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG302:![0-9]+]]
-// CHECK-TLS3-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG302]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG303:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG301:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG301]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG302:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S4D1Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG304:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG303:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META305:![0-9]+]], metadata !DIExpression()), !dbg [[DBG306:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META304:![0-9]+]], metadata !DIExpression()), !dbg [[DBG305:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]], !dbg [[DBG307:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG308:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]], !dbg [[DBG306:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG307:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S4C2Ei
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG309:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG308:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META310:![0-9]+]], metadata !DIExpression()), !dbg [[DBG311:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META309:![0-9]+]], metadata !DIExpression()), !dbg [[DBG310:![0-9]+]]
// CHECK-TLS3-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META312:![0-9]+]], metadata !DIExpression()), !dbg [[DBG313:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META311:![0-9]+]], metadata !DIExpression()), !dbg [[DBG312:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG314:![0-9]+]]
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG315:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG314]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG316:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG313:![0-9]+]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG314:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG313]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG315:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_ZN2S4D2Ev
-// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG317:![0-9]+]] {
+// CHECK-TLS3-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG316:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
// CHECK-TLS3-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS3-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META318:![0-9]+]], metadata !DIExpression()), !dbg [[DBG319:![0-9]+]]
+// CHECK-TLS3-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META317:![0-9]+]], metadata !DIExpression()), !dbg [[DBG318:![0-9]+]]
// CHECK-TLS3-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG320:![0-9]+]]
-// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG322:![0-9]+]]
-// CHECK-TLS3-NEXT: ret void, !dbg [[DBG323:![0-9]+]]
+// CHECK-TLS3-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG319:![0-9]+]]
+// CHECK-TLS3-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG321:![0-9]+]]
+// CHECK-TLS3-NEXT: ret void, !dbg [[DBG322:![0-9]+]]
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG324:![0-9]+]] {
+// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG323:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
-// CHECK-TLS3-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG326:![0-9]+]]
+// CHECK-TLS3-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG325:![0-9]+]]
// CHECK-TLS3-NEXT: ret void
//
//
// CHECK-TLS3-LABEL: define {{[^@]+}}@__tls_init
-// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG327:![0-9]+]] {
+// CHECK-TLS3-SAME: () #[[ATTR0]] !dbg [[DBG326:![0-9]+]] {
// CHECK-TLS3-NEXT: entry:
-// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i8, ptr @__tls_guard, align 1, !dbg [[DBG328:![0-9]+]]
-// CHECK-TLS3-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG328]]
-// CHECK-TLS3-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT:%.*]], label [[EXIT:%.*]], !dbg [[DBG328]], !prof [[PROF207]]
+// CHECK-TLS3-NEXT: [[TMP0:%.*]] = load i8, ptr @__tls_guard, align 1, !dbg [[DBG327:![0-9]+]]
+// CHECK-TLS3-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG327]]
+// CHECK-TLS3-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT:%.*]], label [[EXIT:%.*]], !dbg [[DBG327]], !prof [[PROF206]]
// CHECK-TLS3: init:
-// CHECK-TLS3-NEXT: store i8 1, ptr @__tls_guard, align 1, !dbg [[DBG328]]
-// CHECK-TLS3-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG328]]
-// CHECK-TLS3-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG328]]
-// CHECK-TLS3-NEXT: br label [[EXIT]], !dbg [[DBG328]]
+// CHECK-TLS3-NEXT: store i8 1, ptr @__tls_guard, align 1, !dbg [[DBG327]]
+// CHECK-TLS3-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG327]]
+// CHECK-TLS3-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG327]]
+// CHECK-TLS3-NEXT: br label [[EXIT]], !dbg [[DBG327]]
// CHECK-TLS3: exit:
// CHECK-TLS3-NEXT: ret void
//
@@ -4936,7 +4936,7 @@ int foobar() {
// CHECK-TLS4-NEXT: [[RETVAL:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: [[RES:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META116:![0-9]+]], metadata !DIExpression()), !dbg [[DBG117:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META116:![0-9]+]], metadata !DIExpression()), !dbg [[DBG117:![0-9]+]]
// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVZ4mainE2sm, align 1, !dbg [[DBG118:![0-9]+]]
// CHECK-TLS4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG118]]
// CHECK-TLS4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG118]], !prof [[PROF119:![0-9]+]]
@@ -5017,9 +5017,9 @@ int foobar() {
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META150:![0-9]+]], metadata !DIExpression()), !dbg [[DBG152:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META150:![0-9]+]], metadata !DIExpression()), !dbg [[DBG152:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META153:![0-9]+]], metadata !DIExpression()), !dbg [[DBG154:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META153:![0-9]+]], metadata !DIExpression()), !dbg [[DBG154:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG155:![0-9]+]]
// CHECK-TLS4-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG155]]
@@ -5031,7 +5031,7 @@ int foobar() {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META158:![0-9]+]], metadata !DIExpression()), !dbg [[DBG159:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META158:![0-9]+]], metadata !DIExpression()), !dbg [[DBG159:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// CHECK-TLS4-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR5]], !dbg [[DBG160:![0-9]+]]
// CHECK-TLS4-NEXT: ret void, !dbg [[DBG161:![0-9]+]]
@@ -5086,10 +5086,10 @@ int foobar() {
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_Z6foobarv
-// CHECK-TLS4-SAME: () #[[ATTR6:[0-9]+]] !dbg [[DBG162:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR3]] !dbg [[DBG162:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[RES:%.*]] = alloca i32, align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META163:![0-9]+]], metadata !DIExpression()), !dbg [[DBG164:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META163:![0-9]+]], metadata !DIExpression()), !dbg [[DBG164:![0-9]+]]
// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call ptr @_ZTWN6Static1sE(), !dbg [[DBG165:![0-9]+]]
// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP0]], i32 0, i32 0, !dbg [[DBG166:![0-9]+]]
// CHECK-TLS4-NEXT: [[TMP1:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG166]]
@@ -5140,354 +5140,354 @@ int foobar() {
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@__cxx_global_var_init
-// CHECK-TLS4-SAME: () #[[ATTR7:[0-9]+]] !dbg [[DBG188:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR6:[0-9]+]] !dbg [[DBG188:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
-// CHECK-TLS4-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG192:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG194:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG195:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG191:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG193:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG194:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S1C1Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG196:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG195:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META197:![0-9]+]], metadata !DIExpression()), !dbg [[DBG199:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META196:![0-9]+]], metadata !DIExpression()), !dbg [[DBG198:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META200:![0-9]+]], metadata !DIExpression()), !dbg [[DBG201:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META199:![0-9]+]], metadata !DIExpression()), !dbg [[DBG200:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG202:![0-9]+]]
-// CHECK-TLS4-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG202]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG203:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG201:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG201]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG202:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S1D1Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG204:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG203:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META205:![0-9]+]], metadata !DIExpression()), !dbg [[DBG206:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META204:![0-9]+]], metadata !DIExpression()), !dbg [[DBG205:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR5]], !dbg [[DBG207:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG208:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR5]], !dbg [[DBG206:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG207:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S1C2Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG209:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG208:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META210:![0-9]+]], metadata !DIExpression()), !dbg [[DBG211:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META209:![0-9]+]], metadata !DIExpression()), !dbg [[DBG210:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META212:![0-9]+]], metadata !DIExpression()), !dbg [[DBG213:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META211:![0-9]+]], metadata !DIExpression()), !dbg [[DBG212:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG214:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG215:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG214]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG216:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG213:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG214:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG213]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG215:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S1D2Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG217:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG216:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META218:![0-9]+]], metadata !DIExpression()), !dbg [[DBG219:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META217:![0-9]+]], metadata !DIExpression()), !dbg [[DBG218:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG220:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG222:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG223:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG219:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG221:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG222:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
-// CHECK-TLS4-SAME: () #[[ATTR7]] !dbg [[DBG224:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR6]] !dbg [[DBG223:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
-// CHECK-TLS4-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG225:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG227:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG228:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG224:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG226:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG227:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S2C1Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG229:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG228:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META230:![0-9]+]], metadata !DIExpression()), !dbg [[DBG232:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META229:![0-9]+]], metadata !DIExpression()), !dbg [[DBG231:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META233:![0-9]+]], metadata !DIExpression()), !dbg [[DBG234:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META232:![0-9]+]], metadata !DIExpression()), !dbg [[DBG233:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG235:![0-9]+]]
-// CHECK-TLS4-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG235]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG236:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG234:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG234]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG235:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S2D1Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG237:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG236:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META238:![0-9]+]], metadata !DIExpression()), !dbg [[DBG239:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META237:![0-9]+]], metadata !DIExpression()), !dbg [[DBG238:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR5]], !dbg [[DBG240:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG241:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR5]], !dbg [[DBG239:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG240:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S2C2Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG242:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG241:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META243:![0-9]+]], metadata !DIExpression()), !dbg [[DBG244:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META242:![0-9]+]], metadata !DIExpression()), !dbg [[DBG243:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META245:![0-9]+]], metadata !DIExpression()), !dbg [[DBG246:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META244:![0-9]+]], metadata !DIExpression()), !dbg [[DBG245:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG247:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG248:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG247]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG249:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG246:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG247:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG246]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG248:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S2D2Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG250:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG249:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META251:![0-9]+]], metadata !DIExpression()), !dbg [[DBG252:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META250:![0-9]+]], metadata !DIExpression()), !dbg [[DBG251:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG253:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG255:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG256:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG252:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG254:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG255:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
-// CHECK-TLS4-SAME: () #[[ATTR7]] personality ptr @__gxx_personality_v0 !dbg [[DBG257:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR6]] personality ptr @__gxx_personality_v0 !dbg [[DBG256:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: [[ARRAYINIT_ENDOFINIT5:%.*]] = alloca ptr, align 8
-// CHECK-TLS4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG258:![0-9]+]]
-// CHECK-TLS4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG260:![0-9]+]]
+// CHECK-TLS4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG257:![0-9]+]]
+// CHECK-TLS4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG259:![0-9]+]]
// CHECK-TLS4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// CHECK-TLS4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG261:![0-9]+]]
+// CHECK-TLS4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG260:![0-9]+]]
// CHECK-TLS4: invoke.cont:
-// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG260]]
+// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG259]]
// CHECK-TLS4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// CHECK-TLS4-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG262:![0-9]+]]
+// CHECK-TLS4-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG261:![0-9]+]]
// CHECK-TLS4: invoke.cont2:
-// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG260]]
+// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG259]]
// CHECK-TLS4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// CHECK-TLS4-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG263:![0-9]+]]
+// CHECK-TLS4-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG262:![0-9]+]]
// CHECK-TLS4: invoke.cont3:
-// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG264:![0-9]+]]
+// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG263:![0-9]+]]
// CHECK-TLS4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// CHECK-TLS4-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG265:![0-9]+]]
+// CHECK-TLS4-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG264:![0-9]+]]
// CHECK-TLS4: invoke.cont7:
-// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG264]]
+// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG263]]
// CHECK-TLS4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// CHECK-TLS4-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG266:![0-9]+]]
+// CHECK-TLS4-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG265:![0-9]+]]
// CHECK-TLS4: invoke.cont8:
-// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG264]]
+// CHECK-TLS4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG263]]
// CHECK-TLS4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// CHECK-TLS4-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG267:![0-9]+]]
+// CHECK-TLS4-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG266:![0-9]+]]
// CHECK-TLS4: invoke.cont9:
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG268:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_thread_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG267:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG267]]
// CHECK-TLS4: lpad:
// CHECK-TLS4-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS4-NEXT: cleanup, !dbg [[DBG269:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG260]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG260]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG260]]
+// CHECK-TLS4-NEXT: cleanup, !dbg [[DBG268:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG259]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG259]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG259]]
// CHECK-TLS4: arraydestroy.body:
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG260]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG260]]
-// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR5]], !dbg [[DBG260]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG260]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG260]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG259]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG259]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR5]], !dbg [[DBG259]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG259]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG259]]
// CHECK-TLS4: arraydestroy.done4:
-// CHECK-TLS4-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG260]]
+// CHECK-TLS4-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG259]]
// CHECK-TLS4: lpad6:
// CHECK-TLS4-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// CHECK-TLS4-NEXT: cleanup, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG269]]
-// CHECK-TLS4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG264]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG264]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG264]]
+// CHECK-TLS4-NEXT: cleanup, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG268]]
+// CHECK-TLS4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG263]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG263]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG263]]
// CHECK-TLS4: arraydestroy.body11:
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG264]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG264]]
-// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR5]], !dbg [[DBG264]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG264]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG264]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG263]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG263]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR5]], !dbg [[DBG263]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG263]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG263]]
// CHECK-TLS4: arraydestroy.done15:
-// CHECK-TLS4-NEXT: br label [[EHCLEANUP]], !dbg [[DBG264]]
+// CHECK-TLS4-NEXT: br label [[EHCLEANUP]], !dbg [[DBG263]]
// CHECK-TLS4: ehcleanup:
-// CHECK-TLS4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG258]]
+// CHECK-TLS4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG257]]
// CHECK-TLS4: arraydestroy.body17:
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR5]], !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG258]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR5]], !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG257]]
// CHECK-TLS4: arraydestroy.done21:
-// CHECK-TLS4-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG258]]
+// CHECK-TLS4-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG257]]
// CHECK-TLS4: eh.resume:
-// CHECK-TLS4-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG258]]
-// CHECK-TLS4-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG258]]
+// CHECK-TLS4-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG257]]
+// CHECK-TLS4-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG257]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// CHECK-TLS4-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR7]] !dbg [[DBG270:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR6]] !dbg [[DBG269:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META274:![0-9]+]], metadata !DIExpression()), !dbg [[DBG275:![0-9]+]]
-// CHECK-TLS4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG275]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META273:![0-9]+]], metadata !DIExpression()), !dbg [[DBG274:![0-9]+]]
+// CHECK-TLS4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG274]]
// CHECK-TLS4: arraydestroy.body:
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG275]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG275]]
-// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR5]], !dbg [[DBG275]]
-// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG275]]
-// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG275]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG274]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG274]]
+// CHECK-TLS4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR5]], !dbg [[DBG274]]
+// CHECK-TLS4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG274]]
+// CHECK-TLS4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG274]]
// CHECK-TLS4: arraydestroy.done1:
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG275]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG274]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC2Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] align 2 !dbg [[DBG276:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] align 2 !dbg [[DBG275:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META277:![0-9]+]], metadata !DIExpression()), !dbg [[DBG278:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META276:![0-9]+]], metadata !DIExpression()), !dbg [[DBG277:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META279:![0-9]+]], metadata !DIExpression()), !dbg [[DBG280:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META278:![0-9]+]], metadata !DIExpression()), !dbg [[DBG279:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG281:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG282:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG281]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG283:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG280:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG281:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG280]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG282:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD2Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] align 2 !dbg [[DBG284:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] align 2 !dbg [[DBG283:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META285:![0-9]+]], metadata !DIExpression()), !dbg [[DBG286:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META284:![0-9]+]], metadata !DIExpression()), !dbg [[DBG285:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG287:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG289:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG290:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG286:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG288:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG289:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
-// CHECK-TLS4-SAME: () #[[ATTR7]] !dbg [[DBG291:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR6]] !dbg [[DBG290:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG292:![0-9]+]]
-// CHECK-TLS4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG292]]
-// CHECK-TLS4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG292]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG291:![0-9]+]]
+// CHECK-TLS4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG291]]
+// CHECK-TLS4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG291]]
// CHECK-TLS4: init.check:
-// CHECK-TLS4-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG292]]
-// CHECK-TLS4-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG293:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG292]]
-// CHECK-TLS4-NEXT: br label [[INIT_END]], !dbg [[DBG292]]
+// CHECK-TLS4-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG291]]
+// CHECK-TLS4-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG292:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP1:%.*]] = call i32 @__cxa_thread_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR5]], !dbg [[DBG291]]
+// CHECK-TLS4-NEXT: br label [[INIT_END]], !dbg [[DBG291]]
// CHECK-TLS4: init.end:
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG295:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG294:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S4C1Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG296:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG295:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META297:![0-9]+]], metadata !DIExpression()), !dbg [[DBG299:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META296:![0-9]+]], metadata !DIExpression()), !dbg [[DBG298:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META300:![0-9]+]], metadata !DIExpression()), !dbg [[DBG301:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META299:![0-9]+]], metadata !DIExpression()), !dbg [[DBG300:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG302:![0-9]+]]
-// CHECK-TLS4-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG302]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG303:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG301:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG301]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG302:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S4D1Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG304:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG303:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META305:![0-9]+]], metadata !DIExpression()), !dbg [[DBG306:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META304:![0-9]+]], metadata !DIExpression()), !dbg [[DBG305:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR5]], !dbg [[DBG307:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG308:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR5]], !dbg [[DBG306:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG307:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S4C2Ei
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG309:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG308:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META310:![0-9]+]], metadata !DIExpression()), !dbg [[DBG311:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META309:![0-9]+]], metadata !DIExpression()), !dbg [[DBG310:![0-9]+]]
// CHECK-TLS4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META312:![0-9]+]], metadata !DIExpression()), !dbg [[DBG313:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META311:![0-9]+]], metadata !DIExpression()), !dbg [[DBG312:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG314:![0-9]+]]
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG315:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG314]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG316:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG313:![0-9]+]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG314:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG313]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG315:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_ZN2S4D2Ev
-// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG317:![0-9]+]] {
+// CHECK-TLS4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR4]] comdat align 2 !dbg [[DBG316:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
// CHECK-TLS4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// CHECK-TLS4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META318:![0-9]+]], metadata !DIExpression()), !dbg [[DBG319:![0-9]+]]
+// CHECK-TLS4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META317:![0-9]+]], metadata !DIExpression()), !dbg [[DBG318:![0-9]+]]
// CHECK-TLS4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG320:![0-9]+]]
-// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG322:![0-9]+]]
-// CHECK-TLS4-NEXT: ret void, !dbg [[DBG323:![0-9]+]]
+// CHECK-TLS4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG319:![0-9]+]]
+// CHECK-TLS4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG321:![0-9]+]]
+// CHECK-TLS4-NEXT: ret void, !dbg [[DBG322:![0-9]+]]
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// CHECK-TLS4-SAME: () #[[ATTR7]] !dbg [[DBG324:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR6]] !dbg [[DBG323:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
-// CHECK-TLS4-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG326:![0-9]+]]
+// CHECK-TLS4-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG325:![0-9]+]]
// CHECK-TLS4-NEXT: ret void
//
//
// CHECK-TLS4-LABEL: define {{[^@]+}}@__tls_init
-// CHECK-TLS4-SAME: () #[[ATTR7]] !dbg [[DBG327:![0-9]+]] {
+// CHECK-TLS4-SAME: () #[[ATTR6]] !dbg [[DBG326:![0-9]+]] {
// CHECK-TLS4-NEXT: entry:
-// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i8, ptr @__tls_guard, align 1, !dbg [[DBG328:![0-9]+]]
-// CHECK-TLS4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG328]]
-// CHECK-TLS4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT:%.*]], label [[EXIT:%.*]], !dbg [[DBG328]], !prof [[PROF119]]
+// CHECK-TLS4-NEXT: [[TMP0:%.*]] = load i8, ptr @__tls_guard, align 1, !dbg [[DBG327:![0-9]+]]
+// CHECK-TLS4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG327]]
+// CHECK-TLS4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT:%.*]], label [[EXIT:%.*]], !dbg [[DBG327]], !prof [[PROF119]]
// CHECK-TLS4: init:
-// CHECK-TLS4-NEXT: store i8 1, ptr @__tls_guard, align 1, !dbg [[DBG328]]
-// CHECK-TLS4-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG328]]
-// CHECK-TLS4-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG328]]
-// CHECK-TLS4-NEXT: br label [[EXIT]], !dbg [[DBG328]]
+// CHECK-TLS4-NEXT: store i8 1, ptr @__tls_guard, align 1, !dbg [[DBG327]]
+// CHECK-TLS4-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG327]]
+// CHECK-TLS4-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG327]]
+// CHECK-TLS4-NEXT: br label [[EXIT]], !dbg [[DBG327]]
// CHECK-TLS4: exit:
// CHECK-TLS4-NEXT: ret void
//
@@ -5565,34 +5565,34 @@ int foobar() {
// SIMD3-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8
// SIMD3-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// SIMD3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// SIMD3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// SIMD3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// SIMD3: invoke.cont:
// SIMD3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// SIMD3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// SIMD3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
+// SIMD3-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]]
// SIMD3: invoke.cont2:
// SIMD3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8
// SIMD3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// SIMD3-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
+// SIMD3-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]]
// SIMD3: invoke.cont3:
// SIMD3-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8
// SIMD3-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// SIMD3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// SIMD3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
+// SIMD3-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]]
// SIMD3: invoke.cont7:
// SIMD3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// SIMD3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// SIMD3-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
+// SIMD3-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]]
// SIMD3: invoke.cont8:
// SIMD3-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8
// SIMD3-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// SIMD3-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
+// SIMD3-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]]
// SIMD3: invoke.cont9:
// SIMD3-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]]
// SIMD3-NEXT: ret void
// SIMD3: lpad:
// SIMD3-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// SIMD3-NEXT: cleanup
+// SIMD3-NEXT: cleanup
// SIMD3-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0
// SIMD3-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8
// SIMD3-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1
@@ -5610,7 +5610,7 @@ int foobar() {
// SIMD3-NEXT: br label [[EHCLEANUP:%.*]]
// SIMD3: lpad6:
// SIMD3-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// SIMD3-NEXT: cleanup
+// SIMD3-NEXT: cleanup
// SIMD3-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0
// SIMD3-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8
// SIMD3-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1
@@ -5681,7 +5681,7 @@ int foobar() {
// SIMD3: init:
// SIMD3-NEXT: [[TMP2:%.*]] = load i32, ptr @_ZL3gs1, align 4
// SIMD3-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP2]])
-// SIMD3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
+// SIMD3-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]]
// SIMD3: invoke.cont:
// SIMD3-NEXT: [[TMP3:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]]
// SIMD3-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]]
@@ -5726,7 +5726,7 @@ int foobar() {
// SIMD3-NEXT: ret i32 [[TMP21]]
// SIMD3: lpad:
// SIMD3-NEXT: [[TMP22:%.*]] = landingpad { ptr, i32 }
-// SIMD3-NEXT: cleanup
+// SIMD3-NEXT: cleanup
// SIMD3-NEXT: [[TMP23:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 0
// SIMD3-NEXT: store ptr [[TMP23]], ptr [[EXN_SLOT]], align 8
// SIMD3-NEXT: [[TMP24:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 1
@@ -5765,7 +5765,7 @@ int foobar() {
//
//
// SIMD3-LABEL: define {{[^@]+}}@_Z6foobarv
-// SIMD3-SAME: () #[[ATTR5:[0-9]+]] {
+// SIMD3-SAME: () #[[ATTR2]] {
// SIMD3-NEXT: entry:
// SIMD3-NEXT: [[RES:%.*]] = alloca i32, align 4
// SIMD3-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN6Static1sE, align 4
@@ -5953,179 +5953,179 @@ int foobar() {
// SIMD4-LABEL: define {{[^@]+}}@__cxx_global_var_init
// SIMD4-SAME: () #[[ATTR0:[0-9]+]] !dbg [[DBG115:![0-9]+]] {
// SIMD4-NEXT: entry:
-// SIMD4-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG119:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR3:[0-9]+]], !dbg [[DBG121:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG122:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG118:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR3:[0-9]+]], !dbg [[DBG120:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG121:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S1C1Ei
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 !dbg [[DBG123:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1:[0-9]+]] comdat align 2 !dbg [[DBG122:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META124:![0-9]+]], metadata !DIExpression()), !dbg [[DBG126:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META123:![0-9]+]], metadata !DIExpression()), !dbg [[DBG125:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META127:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META126:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG129:![0-9]+]]
-// SIMD4-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG129]]
-// SIMD4-NEXT: ret void, !dbg [[DBG130:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG128:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG128]]
+// SIMD4-NEXT: ret void, !dbg [[DBG129:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S1D1Ev
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG131:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG130:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META131:![0-9]+]], metadata !DIExpression()), !dbg [[DBG132:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]], !dbg [[DBG134:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG135:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR3]], !dbg [[DBG133:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG134:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
-// SIMD4-SAME: () #[[ATTR0]] !dbg [[DBG136:![0-9]+]] {
+// SIMD4-SAME: () #[[ATTR0]] !dbg [[DBG135:![0-9]+]] {
// SIMD4-NEXT: entry:
-// SIMD4-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG137:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG139:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG140:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG136:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG138:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG139:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S2C1Ei
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG141:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG140:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META142:![0-9]+]], metadata !DIExpression()), !dbg [[DBG144:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META141:![0-9]+]], metadata !DIExpression()), !dbg [[DBG143:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META145:![0-9]+]], metadata !DIExpression()), !dbg [[DBG146:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG145:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG147:![0-9]+]]
-// SIMD4-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG147]]
-// SIMD4-NEXT: ret void, !dbg [[DBG148:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG146:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG146]]
+// SIMD4-NEXT: ret void, !dbg [[DBG147:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S2D1Ev
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG149:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG148:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META150:![0-9]+]], metadata !DIExpression()), !dbg [[DBG151:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META149:![0-9]+]], metadata !DIExpression()), !dbg [[DBG150:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR3]], !dbg [[DBG152:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG153:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR3]], !dbg [[DBG151:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG152:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
-// SIMD4-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG154:![0-9]+]] {
+// SIMD4-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG153:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// SIMD4-NEXT: [[ARRAYINIT_ENDOFINIT5:%.*]] = alloca ptr, align 8
-// SIMD4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG155:![0-9]+]]
-// SIMD4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157:![0-9]+]]
+// SIMD4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG154:![0-9]+]]
+// SIMD4-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156:![0-9]+]]
// SIMD4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// SIMD4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG158:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG157:![0-9]+]]
// SIMD4: invoke.cont:
-// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157]]
+// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156]]
// SIMD4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// SIMD4-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG159:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG158:![0-9]+]]
// SIMD4: invoke.cont2:
-// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157]]
+// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156]]
// SIMD4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// SIMD4-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG160:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG159:![0-9]+]]
// SIMD4: invoke.cont3:
-// SIMD4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG155]]
-// SIMD4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161:![0-9]+]]
+// SIMD4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG154]]
+// SIMD4-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160:![0-9]+]]
// SIMD4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// SIMD4-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG162:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG161:![0-9]+]]
// SIMD4: invoke.cont7:
-// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161]]
+// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160]]
// SIMD4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// SIMD4-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG163:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG162:![0-9]+]]
// SIMD4: invoke.cont8:
-// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161]]
+// SIMD4-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160]]
// SIMD4-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// SIMD4-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG164:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG163:![0-9]+]]
// SIMD4: invoke.cont9:
-// SIMD4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG165:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG165]]
+// SIMD4-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG164:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG164]]
// SIMD4: lpad:
// SIMD4-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// SIMD4-NEXT: cleanup, !dbg [[DBG166:![0-9]+]]
-// SIMD4-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG166]]
-// SIMD4-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG166]]
-// SIMD4-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG166]]
-// SIMD4-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG166]]
-// SIMD4-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG157]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG157]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG157]]
+// SIMD4-NEXT: cleanup, !dbg [[DBG165:![0-9]+]]
+// SIMD4-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG165]]
+// SIMD4-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG165]]
+// SIMD4-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG165]]
+// SIMD4-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG165]]
+// SIMD4-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG156]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG156]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG156]]
// SIMD4: arraydestroy.body:
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG157]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG157]]
-// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG157]]
-// SIMD4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG157]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG157]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG156]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG156]]
+// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG156]]
+// SIMD4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG156]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG156]]
// SIMD4: arraydestroy.done4:
-// SIMD4-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG157]]
+// SIMD4-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG156]]
// SIMD4: lpad6:
// SIMD4-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// SIMD4-NEXT: cleanup, !dbg [[DBG166]]
-// SIMD4-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG166]]
-// SIMD4-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG166]]
-// SIMD4-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG166]]
-// SIMD4-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG166]]
-// SIMD4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG161]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG161]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG161]]
+// SIMD4-NEXT: cleanup, !dbg [[DBG165]]
+// SIMD4-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG165]]
+// SIMD4-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG165]]
+// SIMD4-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG165]]
+// SIMD4-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG165]]
+// SIMD4-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG160]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG160]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG160]]
// SIMD4: arraydestroy.body11:
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG161]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG161]]
-// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR3]], !dbg [[DBG161]]
-// SIMD4-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG161]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG161]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG160]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG160]]
+// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR3]], !dbg [[DBG160]]
+// SIMD4-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG160]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG160]]
// SIMD4: arraydestroy.done15:
-// SIMD4-NEXT: br label [[EHCLEANUP]], !dbg [[DBG161]]
+// SIMD4-NEXT: br label [[EHCLEANUP]], !dbg [[DBG160]]
// SIMD4: ehcleanup:
-// SIMD4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG155]]
-// SIMD4-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG155]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG155]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG155]]
+// SIMD4-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG154]]
+// SIMD4-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG154]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG154]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG154]]
// SIMD4: arraydestroy.body17:
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG155]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG155]]
-// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR3]], !dbg [[DBG155]]
-// SIMD4-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG155]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG155]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG154]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG154]]
+// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR3]], !dbg [[DBG154]]
+// SIMD4-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG154]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG154]]
// SIMD4: arraydestroy.done21:
-// SIMD4-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG155]]
+// SIMD4-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG154]]
// SIMD4: eh.resume:
-// SIMD4-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG155]]
-// SIMD4-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG155]]
-// SIMD4-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG155]]
-// SIMD4-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG155]]
-// SIMD4-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG155]]
+// SIMD4-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG154]]
+// SIMD4-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG154]]
+// SIMD4-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG154]]
+// SIMD4-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG154]]
+// SIMD4-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG154]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// SIMD4-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG167:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG166:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META171:![0-9]+]], metadata !DIExpression()), !dbg [[DBG172:![0-9]+]]
-// SIMD4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG172]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META170:![0-9]+]], metadata !DIExpression()), !dbg [[DBG171:![0-9]+]]
+// SIMD4-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG171]]
// SIMD4: arraydestroy.body:
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG172]]
-// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG172]]
-// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG172]]
-// SIMD4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG172]]
-// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG172]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG171]]
+// SIMD4-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG171]]
+// SIMD4-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR3]], !dbg [[DBG171]]
+// SIMD4-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG171]]
+// SIMD4-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG171]]
// SIMD4: arraydestroy.done1:
-// SIMD4-NEXT: ret void, !dbg [[DBG172]]
+// SIMD4-NEXT: ret void, !dbg [[DBG171]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@main
@@ -6136,302 +6136,302 @@ int foobar() {
// SIMD4-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META173:![0-9]+]], metadata !DIExpression()), !dbg [[DBG174:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = load atomic i8, ptr @_ZGVZ4mainE2sm acquire, align 8, !dbg [[DBG175:![0-9]+]]
-// SIMD4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG175]]
-// SIMD4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG175]], !prof [[PROF176:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META172:![0-9]+]], metadata !DIExpression()), !dbg [[DBG173:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load atomic i8, ptr @_ZGVZ4mainE2sm acquire, align 8, !dbg [[DBG174:![0-9]+]]
+// SIMD4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG174]]
+// SIMD4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG174]], !prof [[PROF175:![0-9]+]]
// SIMD4: init.check:
-// SIMD4-NEXT: [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG175]]
-// SIMD4-NEXT: br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]], !dbg [[DBG175]]
+// SIMD4-NEXT: [[TMP1:%.*]] = call i32 @__cxa_guard_acquire(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD4-NEXT: [[TOBOOL:%.*]] = icmp ne i32 [[TMP1]], 0, !dbg [[DBG174]]
+// SIMD4-NEXT: br i1 [[TOBOOL]], label [[INIT:%.*]], label [[INIT_END]], !dbg [[DBG174]]
// SIMD4: init:
-// SIMD4-NEXT: [[TMP2:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG177:![0-9]+]]
+// SIMD4-NEXT: [[TMP2:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG176:![0-9]+]]
// SIMD4-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP2]])
-// SIMD4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG178:![0-9]+]]
+// SIMD4-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG177:![0-9]+]]
// SIMD4: invoke.cont:
-// SIMD4-NEXT: [[TMP3:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD4-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD4-NEXT: br label [[INIT_END]], !dbg [[DBG175]]
+// SIMD4-NEXT: [[TMP3:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD4-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD4-NEXT: br label [[INIT_END]], !dbg [[DBG174]]
// SIMD4: init.end:
-// SIMD4-NEXT: [[TMP4:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG179:![0-9]+]]
-// SIMD4-NEXT: store i32 [[TMP4]], ptr [[RES]], align 4, !dbg [[DBG180:![0-9]+]]
-// SIMD4-NEXT: [[TMP5:%.*]] = load i32, ptr @_ZZ4mainE2sm, align 8, !dbg [[DBG181:![0-9]+]]
-// SIMD4-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG182:![0-9]+]]
-// SIMD4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG182]]
-// SIMD4-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG182]]
-// SIMD4-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG183:![0-9]+]]
-// SIMD4-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG184:![0-9]+]]
-// SIMD4-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG184]]
-// SIMD4-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG184]]
-// SIMD4-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG185:![0-9]+]]
-// SIMD4-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG186:![0-9]+]]
-// SIMD4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG186]]
-// SIMD4-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG186]]
-// SIMD4-NEXT: [[TMP11:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG187:![0-9]+]]
-// SIMD4-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG188:![0-9]+]]
-// SIMD4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]], !dbg [[DBG188]]
-// SIMD4-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG188]]
-// SIMD4-NEXT: [[TMP13:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG189:![0-9]+]]
-// SIMD4-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG190:![0-9]+]]
-// SIMD4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG190]]
-// SIMD4-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG190]]
-// SIMD4-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG191:![0-9]+]]
-// SIMD4-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG192:![0-9]+]]
-// SIMD4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG192]]
-// SIMD4-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG192]]
-// SIMD4-NEXT: [[TMP17:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG193:![0-9]+]]
-// SIMD4-NEXT: [[CONV:%.*]] = fptosi float [[TMP17]] to i32, !dbg [[DBG193]]
-// SIMD4-NEXT: [[TMP18:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG194:![0-9]+]]
-// SIMD4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[CONV]], !dbg [[DBG194]]
-// SIMD4-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG194]]
-// SIMD4-NEXT: [[TMP19:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG195:![0-9]+]]
-// SIMD4-NEXT: [[TMP20:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG196:![0-9]+]]
-// SIMD4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], [[TMP19]], !dbg [[DBG196]]
-// SIMD4-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG196]]
-// SIMD4-NEXT: [[TMP21:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG197:![0-9]+]]
-// SIMD4-NEXT: ret i32 [[TMP21]], !dbg [[DBG198:![0-9]+]]
+// SIMD4-NEXT: [[TMP4:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG178:![0-9]+]]
+// SIMD4-NEXT: store i32 [[TMP4]], ptr [[RES]], align 4, !dbg [[DBG179:![0-9]+]]
+// SIMD4-NEXT: [[TMP5:%.*]] = load i32, ptr @_ZZ4mainE2sm, align 8, !dbg [[DBG180:![0-9]+]]
+// SIMD4-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG181:![0-9]+]]
+// SIMD4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG181]]
+// SIMD4-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG181]]
+// SIMD4-NEXT: [[TMP7:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG182:![0-9]+]]
+// SIMD4-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG183:![0-9]+]]
+// SIMD4-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG183]]
+// SIMD4-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG183]]
+// SIMD4-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG184:![0-9]+]]
+// SIMD4-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG185:![0-9]+]]
+// SIMD4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG185]]
+// SIMD4-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG185]]
+// SIMD4-NEXT: [[TMP11:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG186:![0-9]+]]
+// SIMD4-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG187:![0-9]+]]
+// SIMD4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP12]], [[TMP11]], !dbg [[DBG187]]
+// SIMD4-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG187]]
+// SIMD4-NEXT: [[TMP13:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG188:![0-9]+]]
+// SIMD4-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG189:![0-9]+]]
+// SIMD4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG189]]
+// SIMD4-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG189]]
+// SIMD4-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG190:![0-9]+]]
+// SIMD4-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG191:![0-9]+]]
+// SIMD4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG191]]
+// SIMD4-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG191]]
+// SIMD4-NEXT: [[TMP17:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG192:![0-9]+]]
+// SIMD4-NEXT: [[CONV:%.*]] = fptosi float [[TMP17]] to i32, !dbg [[DBG192]]
+// SIMD4-NEXT: [[TMP18:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG193:![0-9]+]]
+// SIMD4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP18]], [[CONV]], !dbg [[DBG193]]
+// SIMD4-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG193]]
+// SIMD4-NEXT: [[TMP19:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG194:![0-9]+]]
+// SIMD4-NEXT: [[TMP20:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG195:![0-9]+]]
+// SIMD4-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP20]], [[TMP19]], !dbg [[DBG195]]
+// SIMD4-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG195]]
+// SIMD4-NEXT: [[TMP21:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG196:![0-9]+]]
+// SIMD4-NEXT: ret i32 [[TMP21]], !dbg [[DBG197:![0-9]+]]
// SIMD4: lpad:
// SIMD4-NEXT: [[TMP22:%.*]] = landingpad { ptr, i32 }
-// SIMD4-NEXT: cleanup, !dbg [[DBG199:![0-9]+]]
-// SIMD4-NEXT: [[TMP23:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 0, !dbg [[DBG199]]
-// SIMD4-NEXT: store ptr [[TMP23]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG199]]
-// SIMD4-NEXT: [[TMP24:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 1, !dbg [[DBG199]]
-// SIMD4-NEXT: store i32 [[TMP24]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG199]]
-// SIMD4-NEXT: call void @__cxa_guard_abort(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG175]]
-// SIMD4-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG175]]
+// SIMD4-NEXT: cleanup, !dbg [[DBG198:![0-9]+]]
+// SIMD4-NEXT: [[TMP23:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 0, !dbg [[DBG198]]
+// SIMD4-NEXT: store ptr [[TMP23]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG198]]
+// SIMD4-NEXT: [[TMP24:%.*]] = extractvalue { ptr, i32 } [[TMP22]], 1, !dbg [[DBG198]]
+// SIMD4-NEXT: store i32 [[TMP24]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG198]]
+// SIMD4-NEXT: call void @__cxa_guard_abort(ptr @_ZGVZ4mainE2sm) #[[ATTR3]], !dbg [[DBG174]]
+// SIMD4-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG174]]
// SIMD4: eh.resume:
-// SIMD4-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG175]]
-// SIMD4-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG175]]
-// SIMD4-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG175]]
-// SIMD4-NEXT: [[LPAD_VAL8:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG175]]
-// SIMD4-NEXT: resume { ptr, i32 } [[LPAD_VAL8]], !dbg [[DBG175]]
+// SIMD4-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG174]]
+// SIMD4-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG174]]
+// SIMD4-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG174]]
+// SIMD4-NEXT: [[LPAD_VAL8:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG174]]
+// SIMD4-NEXT: resume { ptr, i32 } [[LPAD_VAL8]], !dbg [[DBG174]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC1Ei
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG200:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] align 2 !dbg [[DBG199:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META201:![0-9]+]], metadata !DIExpression()), !dbg [[DBG203:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META200:![0-9]+]], metadata !DIExpression()), !dbg [[DBG202:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META204:![0-9]+]], metadata !DIExpression()), !dbg [[DBG205:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META203:![0-9]+]], metadata !DIExpression()), !dbg [[DBG204:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG206:![0-9]+]]
-// SIMD4-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG206]]
-// SIMD4-NEXT: ret void, !dbg [[DBG207:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG205:![0-9]+]]
+// SIMD4-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG205]]
+// SIMD4-NEXT: ret void, !dbg [[DBG206:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD1Ev
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG208:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG207:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META209:![0-9]+]], metadata !DIExpression()), !dbg [[DBG210:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META208:![0-9]+]], metadata !DIExpression()), !dbg [[DBG209:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]], !dbg [[DBG211:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG212:![0-9]+]]
+// SIMD4-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR3]], !dbg [[DBG210:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG211:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_Z6foobarv
-// SIMD4-SAME: () #[[ATTR6:[0-9]+]] !dbg [[DBG213:![0-9]+]] {
+// SIMD4-SAME: () #[[ATTR2]] !dbg [[DBG212:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[RES:%.*]] = alloca i32, align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META214:![0-9]+]], metadata !DIExpression()), !dbg [[DBG215:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG216:![0-9]+]]
-// SIMD4-NEXT: store i32 [[TMP0]], ptr [[RES]], align 4, !dbg [[DBG217:![0-9]+]]
-// SIMD4-NEXT: [[TMP1:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG218:![0-9]+]]
-// SIMD4-NEXT: [[TMP2:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG219:![0-9]+]]
-// SIMD4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[TMP1]], !dbg [[DBG219]]
-// SIMD4-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG219]]
-// SIMD4-NEXT: [[TMP3:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG220:![0-9]+]]
-// SIMD4-NEXT: [[TMP4:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG221:![0-9]+]]
-// SIMD4-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], [[TMP3]], !dbg [[DBG221]]
-// SIMD4-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG221]]
-// SIMD4-NEXT: [[TMP5:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG222:![0-9]+]]
-// SIMD4-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG223:![0-9]+]]
-// SIMD4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG223]]
-// SIMD4-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG223]]
-// SIMD4-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG224:![0-9]+]]
-// SIMD4-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG225:![0-9]+]]
-// SIMD4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG225]]
-// SIMD4-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG225]]
-// SIMD4-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG226:![0-9]+]]
-// SIMD4-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG227:![0-9]+]]
-// SIMD4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG227]]
-// SIMD4-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG227]]
-// SIMD4-NEXT: [[TMP11:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG228:![0-9]+]]
-// SIMD4-NEXT: [[CONV:%.*]] = fptosi float [[TMP11]] to i32, !dbg [[DBG228]]
-// SIMD4-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG229:![0-9]+]]
-// SIMD4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], [[CONV]], !dbg [[DBG229]]
-// SIMD4-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG229]]
-// SIMD4-NEXT: [[TMP13:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG230:![0-9]+]]
-// SIMD4-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
-// SIMD4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG231]]
-// SIMD4-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG231]]
-// SIMD4-NEXT: [[TMP15:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG232:![0-9]+]]
-// SIMD4-NEXT: ret i32 [[TMP15]], !dbg [[DBG233:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META213:![0-9]+]], metadata !DIExpression()), !dbg [[DBG214:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr @_ZN6Static1sE, align 4, !dbg [[DBG215:![0-9]+]]
+// SIMD4-NEXT: store i32 [[TMP0]], ptr [[RES]], align 4, !dbg [[DBG216:![0-9]+]]
+// SIMD4-NEXT: [[TMP1:%.*]] = load i32, ptr @_ZL3gs1, align 4, !dbg [[DBG217:![0-9]+]]
+// SIMD4-NEXT: [[TMP2:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG218:![0-9]+]]
+// SIMD4-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP2]], [[TMP1]], !dbg [[DBG218]]
+// SIMD4-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG218]]
+// SIMD4-NEXT: [[TMP3:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG219:![0-9]+]]
+// SIMD4-NEXT: [[TMP4:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG220:![0-9]+]]
+// SIMD4-NEXT: [[ADD1:%.*]] = add nsw i32 [[TMP4]], [[TMP3]], !dbg [[DBG220]]
+// SIMD4-NEXT: store i32 [[ADD1]], ptr [[RES]], align 4, !dbg [[DBG220]]
+// SIMD4-NEXT: [[TMP5:%.*]] = load i32, ptr @gs3, align 4, !dbg [[DBG221:![0-9]+]]
+// SIMD4-NEXT: [[TMP6:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG222:![0-9]+]]
+// SIMD4-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP6]], [[TMP5]], !dbg [[DBG222]]
+// SIMD4-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG222]]
+// SIMD4-NEXT: [[TMP7:%.*]] = load i32, ptr getelementptr inbounds ([2 x [3 x %struct.S1]], ptr @arr_x, i64 0, i64 1, i64 1), align 4, !dbg [[DBG223:![0-9]+]]
+// SIMD4-NEXT: [[TMP8:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG224:![0-9]+]]
+// SIMD4-NEXT: [[ADD3:%.*]] = add nsw i32 [[TMP8]], [[TMP7]], !dbg [[DBG224]]
+// SIMD4-NEXT: store i32 [[ADD3]], ptr [[RES]], align 4, !dbg [[DBG224]]
+// SIMD4-NEXT: [[TMP9:%.*]] = load i32, ptr @_ZN2STIiE2stE, align 4, !dbg [[DBG225:![0-9]+]]
+// SIMD4-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG226:![0-9]+]]
+// SIMD4-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG226]]
+// SIMD4-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG226]]
+// SIMD4-NEXT: [[TMP11:%.*]] = load float, ptr @_ZN2STIfE2stE, align 4, !dbg [[DBG227:![0-9]+]]
+// SIMD4-NEXT: [[CONV:%.*]] = fptosi float [[TMP11]] to i32, !dbg [[DBG227]]
+// SIMD4-NEXT: [[TMP12:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG228:![0-9]+]]
+// SIMD4-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP12]], [[CONV]], !dbg [[DBG228]]
+// SIMD4-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG228]]
+// SIMD4-NEXT: [[TMP13:%.*]] = load i32, ptr @_ZN2STI2S4E2stE, align 4, !dbg [[DBG229:![0-9]+]]
+// SIMD4-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG230:![0-9]+]]
+// SIMD4-NEXT: [[ADD6:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG230]]
+// SIMD4-NEXT: store i32 [[ADD6]], ptr [[RES]], align 4, !dbg [[DBG230]]
+// SIMD4-NEXT: [[TMP15:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
+// SIMD4-NEXT: ret i32 [[TMP15]], !dbg [[DBG232:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@__cxx_global_var_init.3
-// SIMD4-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG234:![0-9]+]] {
+// SIMD4-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG233:![0-9]+]] {
// SIMD4-NEXT: entry:
-// SIMD4-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG235:![0-9]+]]
-// SIMD4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG235]]
-// SIMD4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG235]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG234:![0-9]+]]
+// SIMD4-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG234]]
+// SIMD4-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG234]]
// SIMD4: init.check:
-// SIMD4-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG235]]
-// SIMD4-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG236:![0-9]+]]
-// SIMD4-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG235]]
-// SIMD4-NEXT: br label [[INIT_END]], !dbg [[DBG235]]
+// SIMD4-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG234]]
+// SIMD4-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG235:![0-9]+]]
+// SIMD4-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR3]], !dbg [[DBG234]]
+// SIMD4-NEXT: br label [[INIT_END]], !dbg [[DBG234]]
// SIMD4: init.end:
-// SIMD4-NEXT: ret void, !dbg [[DBG238:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG237:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S4C1Ei
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG239:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR1]] comdat align 2 !dbg [[DBG238:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META240:![0-9]+]], metadata !DIExpression()), !dbg [[DBG242:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META239:![0-9]+]], metadata !DIExpression()), !dbg [[DBG241:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META243:![0-9]+]], metadata !DIExpression()), !dbg [[DBG244:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META242:![0-9]+]], metadata !DIExpression()), !dbg [[DBG243:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG245:![0-9]+]]
-// SIMD4-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG245]]
-// SIMD4-NEXT: ret void, !dbg [[DBG246:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG244:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG244]]
+// SIMD4-NEXT: ret void, !dbg [[DBG245:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S4D1Ev
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG247:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG246:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META248:![0-9]+]], metadata !DIExpression()), !dbg [[DBG249:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META247:![0-9]+]], metadata !DIExpression()), !dbg [[DBG248:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]], !dbg [[DBG250:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG251:![0-9]+]]
+// SIMD4-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR3]], !dbg [[DBG249:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG250:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S1C2Ei
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG252:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG251:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META253:![0-9]+]], metadata !DIExpression()), !dbg [[DBG254:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META252:![0-9]+]], metadata !DIExpression()), !dbg [[DBG253:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META255:![0-9]+]], metadata !DIExpression()), !dbg [[DBG256:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META254:![0-9]+]], metadata !DIExpression()), !dbg [[DBG255:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG257:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG258:![0-9]+]]
-// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG257]]
-// SIMD4-NEXT: ret void, !dbg [[DBG259:![0-9]+]]
+// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG256:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG257:![0-9]+]]
+// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG256]]
+// SIMD4-NEXT: ret void, !dbg [[DBG258:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S1D2Ev
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG260:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG259:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META261:![0-9]+]], metadata !DIExpression()), !dbg [[DBG262:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META260:![0-9]+]], metadata !DIExpression()), !dbg [[DBG261:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG263:![0-9]+]]
-// SIMD4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG265:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG266:![0-9]+]]
+// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG262:![0-9]+]]
+// SIMD4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG264:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG265:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S2C2Ei
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG267:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG266:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META268:![0-9]+]], metadata !DIExpression()), !dbg [[DBG269:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META267:![0-9]+]], metadata !DIExpression()), !dbg [[DBG268:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META270:![0-9]+]], metadata !DIExpression()), !dbg [[DBG271:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META269:![0-9]+]], metadata !DIExpression()), !dbg [[DBG270:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG272:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG273:![0-9]+]]
-// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG272]]
-// SIMD4-NEXT: ret void, !dbg [[DBG274:![0-9]+]]
+// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG271:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG272:![0-9]+]]
+// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG271]]
+// SIMD4-NEXT: ret void, !dbg [[DBG273:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S2D2Ev
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG275:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG274:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META276:![0-9]+]], metadata !DIExpression()), !dbg [[DBG277:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META275:![0-9]+]], metadata !DIExpression()), !dbg [[DBG276:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG278:![0-9]+]]
-// SIMD4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG280:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG281:![0-9]+]]
+// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG277:![0-9]+]]
+// SIMD4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG279:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG280:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC2Ei
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG282:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG281:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META283:![0-9]+]], metadata !DIExpression()), !dbg [[DBG284:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META282:![0-9]+]], metadata !DIExpression()), !dbg [[DBG283:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META285:![0-9]+]], metadata !DIExpression()), !dbg [[DBG286:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META284:![0-9]+]], metadata !DIExpression()), !dbg [[DBG285:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG287:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG288:![0-9]+]]
-// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG287]]
-// SIMD4-NEXT: ret void, !dbg [[DBG289:![0-9]+]]
+// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG286:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG287:![0-9]+]]
+// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG286]]
+// SIMD4-NEXT: ret void, !dbg [[DBG288:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD2Ev
-// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG290:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG289:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META291:![0-9]+]], metadata !DIExpression()), !dbg [[DBG292:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META290:![0-9]+]], metadata !DIExpression()), !dbg [[DBG291:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG293:![0-9]+]]
-// SIMD4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG295:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG296:![0-9]+]]
+// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG292:![0-9]+]]
+// SIMD4-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG294:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG295:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S4C2Ei
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG297:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG296:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META298:![0-9]+]], metadata !DIExpression()), !dbg [[DBG299:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META297:![0-9]+]], metadata !DIExpression()), !dbg [[DBG298:![0-9]+]]
// SIMD4-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META300:![0-9]+]], metadata !DIExpression()), !dbg [[DBG301:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META299:![0-9]+]], metadata !DIExpression()), !dbg [[DBG300:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG302:![0-9]+]]
-// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG303:![0-9]+]]
-// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG302]]
-// SIMD4-NEXT: ret void, !dbg [[DBG304:![0-9]+]]
+// SIMD4-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG301:![0-9]+]]
+// SIMD4-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG302:![0-9]+]]
+// SIMD4-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG301]]
+// SIMD4-NEXT: ret void, !dbg [[DBG303:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_ZN2S4D2Ev
-// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG305:![0-9]+]] {
+// SIMD4-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG304:![0-9]+]] {
// SIMD4-NEXT: entry:
// SIMD4-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// SIMD4-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META306:![0-9]+]], metadata !DIExpression()), !dbg [[DBG307:![0-9]+]]
+// SIMD4-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META305:![0-9]+]], metadata !DIExpression()), !dbg [[DBG306:![0-9]+]]
// SIMD4-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG308:![0-9]+]]
-// SIMD4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG310:![0-9]+]]
-// SIMD4-NEXT: ret void, !dbg [[DBG311:![0-9]+]]
+// SIMD4-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG307:![0-9]+]]
+// SIMD4-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG309:![0-9]+]]
+// SIMD4-NEXT: ret void, !dbg [[DBG310:![0-9]+]]
//
//
// SIMD4-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// SIMD4-SAME: () #[[ATTR0]] !dbg [[DBG312:![0-9]+]] {
+// SIMD4-SAME: () #[[ATTR0]] !dbg [[DBG311:![0-9]+]] {
// SIMD4-NEXT: entry:
-// SIMD4-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG314:![0-9]+]]
-// SIMD4-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG314]]
-// SIMD4-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG314]]
+// SIMD4-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG313:![0-9]+]]
+// SIMD4-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG313]]
+// SIMD4-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG313]]
// SIMD4-NEXT: ret void
//
//
@@ -6440,7 +6440,7 @@ int foobar() {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META118:![0-9]+]], metadata !DIExpression()), !dbg [[DBG120:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META118:![0-9]+]], metadata !DIExpression()), !dbg [[DBG120:![0-9]+]]
// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG121:![0-9]+]]
// DEBUG1-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[TMP1]], i32 noundef 5), !dbg [[DBG122:![0-9]+]]
// DEBUG1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG121]]
@@ -6453,9 +6453,9 @@ int foobar() {
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META124:![0-9]+]], metadata !DIExpression()), !dbg [[DBG126:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META124:![0-9]+]], metadata !DIExpression()), !dbg [[DBG126:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META127:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META127:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG129:![0-9]+]]
// DEBUG1-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG129]]
@@ -6467,7 +6467,7 @@ int foobar() {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG133]]
// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP1]]) #[[ATTR4:[0-9]+]], !dbg [[DBG133]]
// DEBUG1-NEXT: ret void, !dbg [[DBG134:![0-9]+]]
@@ -6478,7 +6478,7 @@ int foobar() {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META136:![0-9]+]], metadata !DIExpression()), !dbg [[DBG137:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META136:![0-9]+]], metadata !DIExpression()), !dbg [[DBG137:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
// DEBUG1-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]], !dbg [[DBG138:![0-9]+]]
// DEBUG1-NEXT: ret void, !dbg [[DBG139:![0-9]+]]
@@ -6502,47 +6502,47 @@ int foobar() {
// DEBUG1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: [[ARRAYINIT_ENDOFINIT9:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META143:![0-9]+]], metadata !DIExpression()), !dbg [[DBG144:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META143:![0-9]+]], metadata !DIExpression()), !dbg [[DBG144:![0-9]+]]
// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG145:![0-9]+]]
// DEBUG1-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP1]], i64 0, i64 0, !dbg [[DBG146:![0-9]+]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_BEGIN]], ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG146]]
// DEBUG1-NEXT: [[ARRAYINIT_BEGIN1:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0, !dbg [[DBG147:![0-9]+]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_BEGIN1]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG147]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN1]], i32 noundef 1)
-// DEBUG1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG148:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG148:![0-9]+]]
// DEBUG1: invoke.cont:
// DEBUG1-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[ARRAYINIT_BEGIN1]], i64 1, !dbg [[DBG147]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_ELEMENT]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG147]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
-// DEBUG1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG149:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG149:![0-9]+]]
// DEBUG1: invoke.cont3:
// DEBUG1-NEXT: [[ARRAYINIT_ELEMENT4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT]], i64 1, !dbg [[DBG147]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_ELEMENT4]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG147]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT4]], i32 noundef 3)
-// DEBUG1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]], !dbg [[DBG150:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]], !dbg [[DBG150:![0-9]+]]
// DEBUG1: invoke.cont5:
// DEBUG1-NEXT: [[ARRAYINIT_ELEMENT7:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 1, !dbg [[DBG146]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_ELEMENT7]], ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG146]]
// DEBUG1-NEXT: [[ARRAYINIT_BEGIN8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_ELEMENT7]], i64 0, i64 0, !dbg [[DBG151:![0-9]+]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_BEGIN8]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG151]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN8]], i32 noundef 4)
-// DEBUG1-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]], !dbg [[DBG152:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]], !dbg [[DBG152:![0-9]+]]
// DEBUG1: invoke.cont11:
// DEBUG1-NEXT: [[ARRAYINIT_ELEMENT12:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_BEGIN8]], i64 1, !dbg [[DBG151]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_ELEMENT12]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG151]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT12]], i32 noundef 5)
-// DEBUG1-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]], !dbg [[DBG153:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]], !dbg [[DBG153:![0-9]+]]
// DEBUG1: invoke.cont13:
// DEBUG1-NEXT: [[ARRAYINIT_ELEMENT14:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT12]], i64 1, !dbg [[DBG151]]
// DEBUG1-NEXT: store ptr [[ARRAYINIT_ELEMENT14]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG151]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT14]], i32 noundef 6)
-// DEBUG1-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]], !dbg [[DBG154:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]], !dbg [[DBG154:![0-9]+]]
// DEBUG1: invoke.cont15:
// DEBUG1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG145]]
// DEBUG1-NEXT: ret ptr [[TMP2]], !dbg [[DBG145]]
// DEBUG1: lpad:
// DEBUG1-NEXT: [[TMP3:%.*]] = landingpad { ptr, i32 }
-// DEBUG1-NEXT: cleanup, !dbg [[DBG144]]
+// DEBUG1-NEXT: cleanup, !dbg [[DBG144]]
// DEBUG1-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0, !dbg [[DBG144]]
// DEBUG1-NEXT: store ptr [[TMP4]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG144]]
// DEBUG1-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 1, !dbg [[DBG144]]
@@ -6560,7 +6560,7 @@ int foobar() {
// DEBUG1-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG147]]
// DEBUG1: lpad10:
// DEBUG1-NEXT: [[TMP7:%.*]] = landingpad { ptr, i32 }
-// DEBUG1-NEXT: cleanup, !dbg [[DBG144]]
+// DEBUG1-NEXT: cleanup, !dbg [[DBG144]]
// DEBUG1-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 0, !dbg [[DBG144]]
// DEBUG1-NEXT: store ptr [[TMP8]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG144]]
// DEBUG1-NEXT: [[TMP9:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 1, !dbg [[DBG144]]
@@ -6603,7 +6603,7 @@ int foobar() {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META156:![0-9]+]], metadata !DIExpression()), !dbg [[DBG157:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META156:![0-9]+]], metadata !DIExpression()), !dbg [[DBG157:![0-9]+]]
// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG157]]
// DEBUG1-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP1]], i64 6, !dbg [[DBG157]]
// DEBUG1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG157]]
@@ -6628,209 +6628,209 @@ int foobar() {
// DEBUG1-LABEL: define {{[^@]+}}@__cxx_global_var_init
// DEBUG1-SAME: () #[[ATTR0]] !dbg [[DBG161:![0-9]+]] {
// DEBUG1-NEXT: entry:
-// DEBUG1-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG165:![0-9]+]]
-// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG167:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG168:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG164:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG166:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG167:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S1C2Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG169:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG168:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META170:![0-9]+]], metadata !DIExpression()), !dbg [[DBG171:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META169:![0-9]+]], metadata !DIExpression()), !dbg [[DBG170:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META172:![0-9]+]], metadata !DIExpression()), !dbg [[DBG173:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META171:![0-9]+]], metadata !DIExpression()), !dbg [[DBG172:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG174:![0-9]+]]
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG175:![0-9]+]]
-// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG174]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG176:![0-9]+]]
+// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG173:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG174:![0-9]+]]
+// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG173]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG175:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S1D2Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG177:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG176:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META178:![0-9]+]], metadata !DIExpression()), !dbg [[DBG179:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META177:![0-9]+]], metadata !DIExpression()), !dbg [[DBG178:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG180:![0-9]+]]
-// DEBUG1-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG182:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG183:![0-9]+]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG179:![0-9]+]]
+// DEBUG1-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG181:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG182:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@__cxx_global_var_init.4
-// DEBUG1-SAME: () #[[ATTR0]] !dbg [[DBG184:![0-9]+]] {
+// DEBUG1-SAME: () #[[ATTR0]] !dbg [[DBG183:![0-9]+]] {
// DEBUG1-NEXT: entry:
-// DEBUG1-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG185:![0-9]+]]
-// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG187:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG188:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG184:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG186:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG187:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S2C1Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG189:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG188:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META190:![0-9]+]], metadata !DIExpression()), !dbg [[DBG192:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META189:![0-9]+]], metadata !DIExpression()), !dbg [[DBG191:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META193:![0-9]+]], metadata !DIExpression()), !dbg [[DBG194:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META192:![0-9]+]], metadata !DIExpression()), !dbg [[DBG193:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG195:![0-9]+]]
-// DEBUG1-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG195]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG196:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG194:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG194]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG195:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S2D1Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG197:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG196:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META198:![0-9]+]], metadata !DIExpression()), !dbg [[DBG199:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META197:![0-9]+]], metadata !DIExpression()), !dbg [[DBG198:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR4]], !dbg [[DBG200:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG201:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR4]], !dbg [[DBG199:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG200:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S2C2Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG202:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG201:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META203:![0-9]+]], metadata !DIExpression()), !dbg [[DBG204:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META202:![0-9]+]], metadata !DIExpression()), !dbg [[DBG203:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META205:![0-9]+]], metadata !DIExpression()), !dbg [[DBG206:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META204:![0-9]+]], metadata !DIExpression()), !dbg [[DBG205:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG207:![0-9]+]]
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG208:![0-9]+]]
-// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG207]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG209:![0-9]+]]
+// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG206:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG207:![0-9]+]]
+// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG206]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG208:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S2D2Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG210:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG209:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META211:![0-9]+]], metadata !DIExpression()), !dbg [[DBG212:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META210:![0-9]+]], metadata !DIExpression()), !dbg [[DBG211:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG213:![0-9]+]]
-// DEBUG1-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG215:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG216:![0-9]+]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG212:![0-9]+]]
+// DEBUG1-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG214:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG215:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@__cxx_global_var_init.5
-// DEBUG1-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG217:![0-9]+]] {
+// DEBUG1-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG216:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: [[ARRAYINIT_ENDOFINIT5:%.*]] = alloca ptr, align 8
-// DEBUG1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG218:![0-9]+]]
-// DEBUG1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG220:![0-9]+]]
+// DEBUG1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG217:![0-9]+]]
+// DEBUG1-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG219:![0-9]+]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// DEBUG1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG221:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG220:![0-9]+]]
// DEBUG1: invoke.cont:
-// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG220]]
+// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG219]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// DEBUG1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG222:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG221:![0-9]+]]
// DEBUG1: invoke.cont2:
-// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG220]]
+// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG219]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// DEBUG1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG223:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG222:![0-9]+]]
// DEBUG1: invoke.cont3:
-// DEBUG1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG218]]
-// DEBUG1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG224:![0-9]+]]
+// DEBUG1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG217]]
+// DEBUG1-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG223:![0-9]+]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// DEBUG1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG225:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG224:![0-9]+]]
// DEBUG1: invoke.cont7:
-// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG224]]
+// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG223]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// DEBUG1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG226:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG225:![0-9]+]]
// DEBUG1: invoke.cont8:
-// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG224]]
+// DEBUG1-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG223]]
// DEBUG1-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// DEBUG1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG227:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG226:![0-9]+]]
// DEBUG1: invoke.cont9:
-// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG228:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG228]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG227:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG227]]
// DEBUG1: lpad:
// DEBUG1-NEXT: [[TMP1:%.*]] = landingpad { ptr, i32 }
-// DEBUG1-NEXT: cleanup, !dbg [[DBG229:![0-9]+]]
-// DEBUG1-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG229]]
-// DEBUG1-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG229]]
-// DEBUG1-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG229]]
-// DEBUG1-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG229]]
-// DEBUG1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG220]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG220]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG220]]
+// DEBUG1-NEXT: cleanup, !dbg [[DBG228:![0-9]+]]
+// DEBUG1-NEXT: [[TMP2:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 0, !dbg [[DBG228]]
+// DEBUG1-NEXT: store ptr [[TMP2]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG228]]
+// DEBUG1-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP1]], 1, !dbg [[DBG228]]
+// DEBUG1-NEXT: store i32 [[TMP3]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG228]]
+// DEBUG1-NEXT: [[TMP4:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG219]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP4]], !dbg [[DBG219]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG219]]
// DEBUG1: arraydestroy.body:
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG220]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG220]]
-// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG220]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG220]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG220]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP4]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG219]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG219]]
+// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG219]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG219]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG219]]
// DEBUG1: arraydestroy.done4:
-// DEBUG1-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG220]]
+// DEBUG1-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG219]]
// DEBUG1: lpad6:
// DEBUG1-NEXT: [[TMP5:%.*]] = landingpad { ptr, i32 }
-// DEBUG1-NEXT: cleanup, !dbg [[DBG229]]
-// DEBUG1-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG229]]
-// DEBUG1-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG229]]
-// DEBUG1-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG229]]
-// DEBUG1-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG229]]
-// DEBUG1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG224]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG224]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG224]]
+// DEBUG1-NEXT: cleanup, !dbg [[DBG228]]
+// DEBUG1-NEXT: [[TMP6:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 0, !dbg [[DBG228]]
+// DEBUG1-NEXT: store ptr [[TMP6]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG228]]
+// DEBUG1-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP5]], 1, !dbg [[DBG228]]
+// DEBUG1-NEXT: store i32 [[TMP7]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG228]]
+// DEBUG1-NEXT: [[TMP8:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG223]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP8]], !dbg [[DBG223]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG223]]
// DEBUG1: arraydestroy.body11:
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG224]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG224]]
-// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR4]], !dbg [[DBG224]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG224]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG224]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP8]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG223]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG223]]
+// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR4]], !dbg [[DBG223]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG223]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG223]]
// DEBUG1: arraydestroy.done15:
-// DEBUG1-NEXT: br label [[EHCLEANUP]], !dbg [[DBG224]]
+// DEBUG1-NEXT: br label [[EHCLEANUP]], !dbg [[DBG223]]
// DEBUG1: ehcleanup:
-// DEBUG1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG218]]
-// DEBUG1-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG218]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG218]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG218]]
+// DEBUG1-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG217]]
+// DEBUG1-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP9]], i64 0, i64 0, !dbg [[DBG217]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG217]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG217]]
// DEBUG1: arraydestroy.body17:
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG218]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG218]]
-// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR4]], !dbg [[DBG218]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG218]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG218]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG217]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG217]]
+// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR4]], !dbg [[DBG217]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG217]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG217]]
// DEBUG1: arraydestroy.done21:
-// DEBUG1-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG218]]
+// DEBUG1-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG217]]
// DEBUG1: eh.resume:
-// DEBUG1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG218]]
-// DEBUG1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG218]]
-// DEBUG1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG218]]
-// DEBUG1-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG218]]
-// DEBUG1-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG218]]
+// DEBUG1-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG217]]
+// DEBUG1-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG217]]
+// DEBUG1-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG217]]
+// DEBUG1-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG217]]
+// DEBUG1-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG217]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG230:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG229:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META233:![0-9]+]], metadata !DIExpression()), !dbg [[DBG234:![0-9]+]]
-// DEBUG1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG234]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META232:![0-9]+]], metadata !DIExpression()), !dbg [[DBG233:![0-9]+]]
+// DEBUG1-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG233]]
// DEBUG1: arraydestroy.body:
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG234]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG234]]
-// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG234]]
-// DEBUG1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG234]]
-// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG234]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG233]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG233]]
+// DEBUG1-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG233]]
+// DEBUG1-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG233]]
+// DEBUG1-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG233]]
// DEBUG1: arraydestroy.done1:
-// DEBUG1-NEXT: ret void, !dbg [[DBG234]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG233]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@main
@@ -6840,9 +6840,9 @@ int foobar() {
// DEBUG1-NEXT: [[RES:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
-// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9:[0-9]+]])
+// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9:[0-9]+]]), !dbg [[DBG234:![0-9]+]]
// DEBUG1-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META235:![0-9]+]], metadata !DIExpression()), !dbg [[DBG236:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META235:![0-9]+]], metadata !DIExpression()), !dbg [[DBG236:![0-9]+]]
// DEBUG1-NEXT: [[TMP1:%.*]] = load atomic i8, ptr @_ZGVZ4mainE2sm acquire, align 8, !dbg [[DBG237:![0-9]+]]
// DEBUG1-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP1]], 0, !dbg [[DBG237]]
// DEBUG1-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG237]], !prof [[PROF238:![0-9]+]]
@@ -6853,76 +6853,76 @@ int foobar() {
// DEBUG1: init:
// DEBUG1-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7:[0-9]+]]), !dbg [[DBG237]]
// DEBUG1-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB7]], ptr @_ZZ4mainE2sm, ptr @.__kmpc_global_ctor_..6, ptr null, ptr @.__kmpc_global_dtor_..7), !dbg [[DBG237]]
-// DEBUG1-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB9]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG239:![0-9]+]]
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP4]], i32 0, i32 0, !dbg [[DBG240:![0-9]+]]
-// DEBUG1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG240]]
+// DEBUG1-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB9]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG234]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP4]], i32 0, i32 0, !dbg [[DBG239:![0-9]+]]
+// DEBUG1-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG239]]
// DEBUG1-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP5]])
-// DEBUG1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG241:![0-9]+]]
+// DEBUG1-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG240:![0-9]+]]
// DEBUG1: invoke.cont:
// DEBUG1-NEXT: [[TMP6:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG237]]
// DEBUG1-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR4]], !dbg [[DBG237]]
// DEBUG1-NEXT: br label [[INIT_END]], !dbg [[DBG237]]
// DEBUG1: init.end:
-// DEBUG1-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB11:[0-9]+]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG242:![0-9]+]]
-// DEBUG1-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP7]], i32 0, i32 0, !dbg [[DBG243:![0-9]+]]
-// DEBUG1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG243]]
-// DEBUG1-NEXT: store i32 [[TMP8]], ptr [[RES]], align 4, !dbg [[DBG244:![0-9]+]]
-// DEBUG1-NEXT: [[TMP9:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB13:[0-9]+]], i32 [[TMP0]], ptr @_ZZ4mainE2sm, i64 24, ptr @_ZZ4mainE2sm.cache.), !dbg [[DBG245:![0-9]+]]
-// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[TMP9]], i32 0, i32 0, !dbg [[DBG246:![0-9]+]]
-// DEBUG1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A2]], align 8, !dbg [[DBG246]]
-// DEBUG1-NEXT: [[TMP11:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG247:![0-9]+]]
-// DEBUG1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]], !dbg [[DBG247]]
-// DEBUG1-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG247]]
-// DEBUG1-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB15:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG248:![0-9]+]]
-// DEBUG1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP12]], i32 0, i32 0, !dbg [[DBG249:![0-9]+]]
-// DEBUG1-NEXT: [[TMP13:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG249]]
-// DEBUG1-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG250:![0-9]+]]
-// DEBUG1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG250]]
-// DEBUG1-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG250]]
-// DEBUG1-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG251:![0-9]+]]
-// DEBUG1-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG252:![0-9]+]]
-// DEBUG1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG252]]
-// DEBUG1-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG252]]
-// DEBUG1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB17:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG253:![0-9]+]]
-// DEBUG1-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP17]], i32 0, i32 0, !dbg [[DBG254:![0-9]+]]
-// DEBUG1-NEXT: [[TMP18:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG254]]
-// DEBUG1-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG255:![0-9]+]]
-// DEBUG1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], [[TMP18]], !dbg [[DBG255]]
-// DEBUG1-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG255]]
-// DEBUG1-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB19:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG256:![0-9]+]]
-// DEBUG1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP20]], i64 0, i64 1, !dbg [[DBG256]]
-// DEBUG1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG256]]
-// DEBUG1-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX8]], i32 0, i32 0, !dbg [[DBG257:![0-9]+]]
-// DEBUG1-NEXT: [[TMP21:%.*]] = load i32, ptr [[A9]], align 4, !dbg [[DBG257]]
-// DEBUG1-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG258:![0-9]+]]
-// DEBUG1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG258]]
-// DEBUG1-NEXT: store i32 [[ADD10]], ptr [[RES]], align 4, !dbg [[DBG258]]
-// DEBUG1-NEXT: [[TMP23:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB21:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG259:![0-9]+]]
-// DEBUG1-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4, !dbg [[DBG259]]
-// DEBUG1-NEXT: [[TMP25:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG260:![0-9]+]]
-// DEBUG1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP25]], [[TMP24]], !dbg [[DBG260]]
-// DEBUG1-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG260]]
-// DEBUG1-NEXT: [[TMP26:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB23:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG261:![0-9]+]]
-// DEBUG1-NEXT: [[TMP27:%.*]] = load float, ptr [[TMP26]], align 4, !dbg [[DBG261]]
-// DEBUG1-NEXT: [[CONV:%.*]] = fptosi float [[TMP27]] to i32, !dbg [[DBG261]]
-// DEBUG1-NEXT: [[TMP28:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG262:![0-9]+]]
-// DEBUG1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[CONV]], !dbg [[DBG262]]
-// DEBUG1-NEXT: store i32 [[ADD12]], ptr [[RES]], align 4, !dbg [[DBG262]]
-// DEBUG1-NEXT: [[TMP29:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB25:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG263:![0-9]+]]
-// DEBUG1-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP29]], i32 0, i32 0, !dbg [[DBG264:![0-9]+]]
-// DEBUG1-NEXT: [[TMP30:%.*]] = load i32, ptr [[A13]], align 4, !dbg [[DBG264]]
-// DEBUG1-NEXT: [[TMP31:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG265:![0-9]+]]
-// DEBUG1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP31]], [[TMP30]], !dbg [[DBG265]]
-// DEBUG1-NEXT: store i32 [[ADD14]], ptr [[RES]], align 4, !dbg [[DBG265]]
-// DEBUG1-NEXT: [[TMP32:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG266:![0-9]+]]
-// DEBUG1-NEXT: ret i32 [[TMP32]], !dbg [[DBG267:![0-9]+]]
+// DEBUG1-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB11:[0-9]+]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG241:![0-9]+]]
+// DEBUG1-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP7]], i32 0, i32 0, !dbg [[DBG242:![0-9]+]]
+// DEBUG1-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG242]]
+// DEBUG1-NEXT: store i32 [[TMP8]], ptr [[RES]], align 4, !dbg [[DBG243:![0-9]+]]
+// DEBUG1-NEXT: [[TMP9:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB13:[0-9]+]], i32 [[TMP0]], ptr @_ZZ4mainE2sm, i64 24, ptr @_ZZ4mainE2sm.cache.), !dbg [[DBG244:![0-9]+]]
+// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[TMP9]], i32 0, i32 0, !dbg [[DBG245:![0-9]+]]
+// DEBUG1-NEXT: [[TMP10:%.*]] = load i32, ptr [[A2]], align 8, !dbg [[DBG245]]
+// DEBUG1-NEXT: [[TMP11:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG246:![0-9]+]]
+// DEBUG1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]], !dbg [[DBG246]]
+// DEBUG1-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG246]]
+// DEBUG1-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB15:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG247:![0-9]+]]
+// DEBUG1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP12]], i32 0, i32 0, !dbg [[DBG248:![0-9]+]]
+// DEBUG1-NEXT: [[TMP13:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG248]]
+// DEBUG1-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG249:![0-9]+]]
+// DEBUG1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG249]]
+// DEBUG1-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG249]]
+// DEBUG1-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG250:![0-9]+]]
+// DEBUG1-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG251:![0-9]+]]
+// DEBUG1-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG251]]
+// DEBUG1-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG251]]
+// DEBUG1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB17:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG252:![0-9]+]]
+// DEBUG1-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP17]], i32 0, i32 0, !dbg [[DBG253:![0-9]+]]
+// DEBUG1-NEXT: [[TMP18:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG253]]
+// DEBUG1-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG254:![0-9]+]]
+// DEBUG1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], [[TMP18]], !dbg [[DBG254]]
+// DEBUG1-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG254]]
+// DEBUG1-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB19:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG255:![0-9]+]]
+// DEBUG1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP20]], i64 0, i64 1, !dbg [[DBG255]]
+// DEBUG1-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG255]]
+// DEBUG1-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX8]], i32 0, i32 0, !dbg [[DBG256:![0-9]+]]
+// DEBUG1-NEXT: [[TMP21:%.*]] = load i32, ptr [[A9]], align 4, !dbg [[DBG256]]
+// DEBUG1-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG257:![0-9]+]]
+// DEBUG1-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG257]]
+// DEBUG1-NEXT: store i32 [[ADD10]], ptr [[RES]], align 4, !dbg [[DBG257]]
+// DEBUG1-NEXT: [[TMP23:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB21:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG258:![0-9]+]]
+// DEBUG1-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4, !dbg [[DBG258]]
+// DEBUG1-NEXT: [[TMP25:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG259:![0-9]+]]
+// DEBUG1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP25]], [[TMP24]], !dbg [[DBG259]]
+// DEBUG1-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG259]]
+// DEBUG1-NEXT: [[TMP26:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB23:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG260:![0-9]+]]
+// DEBUG1-NEXT: [[TMP27:%.*]] = load float, ptr [[TMP26]], align 4, !dbg [[DBG260]]
+// DEBUG1-NEXT: [[CONV:%.*]] = fptosi float [[TMP27]] to i32, !dbg [[DBG260]]
+// DEBUG1-NEXT: [[TMP28:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG261:![0-9]+]]
+// DEBUG1-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[CONV]], !dbg [[DBG261]]
+// DEBUG1-NEXT: store i32 [[ADD12]], ptr [[RES]], align 4, !dbg [[DBG261]]
+// DEBUG1-NEXT: [[TMP29:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB25:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG262:![0-9]+]]
+// DEBUG1-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP29]], i32 0, i32 0, !dbg [[DBG263:![0-9]+]]
+// DEBUG1-NEXT: [[TMP30:%.*]] = load i32, ptr [[A13]], align 4, !dbg [[DBG263]]
+// DEBUG1-NEXT: [[TMP31:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG264:![0-9]+]]
+// DEBUG1-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP31]], [[TMP30]], !dbg [[DBG264]]
+// DEBUG1-NEXT: store i32 [[ADD14]], ptr [[RES]], align 4, !dbg [[DBG264]]
+// DEBUG1-NEXT: [[TMP32:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG265:![0-9]+]]
+// DEBUG1-NEXT: ret i32 [[TMP32]], !dbg [[DBG266:![0-9]+]]
// DEBUG1: lpad:
// DEBUG1-NEXT: [[TMP33:%.*]] = landingpad { ptr, i32 }
-// DEBUG1-NEXT: cleanup, !dbg [[DBG268:![0-9]+]]
-// DEBUG1-NEXT: [[TMP34:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 0, !dbg [[DBG268]]
-// DEBUG1-NEXT: store ptr [[TMP34]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG268]]
-// DEBUG1-NEXT: [[TMP35:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 1, !dbg [[DBG268]]
-// DEBUG1-NEXT: store i32 [[TMP35]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG268]]
+// DEBUG1-NEXT: cleanup, !dbg [[DBG267:![0-9]+]]
+// DEBUG1-NEXT: [[TMP34:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 0, !dbg [[DBG267]]
+// DEBUG1-NEXT: store ptr [[TMP34]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG267]]
+// DEBUG1-NEXT: [[TMP35:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 1, !dbg [[DBG267]]
+// DEBUG1-NEXT: store i32 [[TMP35]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG267]]
// DEBUG1-NEXT: call void @__cxa_guard_abort(ptr @_ZGVZ4mainE2sm) #[[ATTR4]], !dbg [[DBG237]]
// DEBUG1-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG237]]
// DEBUG1: eh.resume:
@@ -6934,436 +6934,436 @@ int foobar() {
//
//
// DEBUG1-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..6
-// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG269:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG268:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
-// DEBUG1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]])
+// DEBUG1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]]), !dbg [[DBG269:![0-9]+]]
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META270:![0-9]+]], metadata !DIExpression()), !dbg [[DBG271:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META270:![0-9]+]], metadata !DIExpression()), !dbg [[DBG271:![0-9]+]]
// DEBUG1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG272:![0-9]+]]
-// DEBUG1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB5]], i32 [[TMP1]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG273:![0-9]+]]
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG274:![0-9]+]]
-// DEBUG1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG274]]
-// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) [[TMP2]], i32 noundef [[TMP4]]), !dbg [[DBG275:![0-9]+]]
+// DEBUG1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB5]], i32 [[TMP1]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG269]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG273:![0-9]+]]
+// DEBUG1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG273]]
+// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) [[TMP2]], i32 noundef [[TMP4]]), !dbg [[DBG274:![0-9]+]]
// DEBUG1-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG272]]
// DEBUG1-NEXT: ret ptr [[TMP5]], !dbg [[DBG272]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC1Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG276:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG275:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META277:![0-9]+]], metadata !DIExpression()), !dbg [[DBG279:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META276:![0-9]+]], metadata !DIExpression()), !dbg [[DBG278:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META280:![0-9]+]], metadata !DIExpression()), !dbg [[DBG281:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META279:![0-9]+]], metadata !DIExpression()), !dbg [[DBG280:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG282:![0-9]+]]
-// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG282]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG283:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG281:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG281]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG282:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..7
-// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG284:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG283:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META285:![0-9]+]], metadata !DIExpression()), !dbg [[DBG286:![0-9]+]]
-// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG286]]
-// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[TMP1]]) #[[ATTR4]], !dbg [[DBG286]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG287:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META284:![0-9]+]], metadata !DIExpression()), !dbg [[DBG285:![0-9]+]]
+// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG285]]
+// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[TMP1]]) #[[ATTR4]], !dbg [[DBG285]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG286:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD1Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG288:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG287:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META289:![0-9]+]], metadata !DIExpression()), !dbg [[DBG290:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META288:![0-9]+]], metadata !DIExpression()), !dbg [[DBG289:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR4]], !dbg [[DBG291:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG292:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR4]], !dbg [[DBG290:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG291:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC2Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG293:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG292:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META294:![0-9]+]], metadata !DIExpression()), !dbg [[DBG295:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META293:![0-9]+]], metadata !DIExpression()), !dbg [[DBG294:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META296:![0-9]+]], metadata !DIExpression()), !dbg [[DBG297:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META295:![0-9]+]], metadata !DIExpression()), !dbg [[DBG296:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG298:![0-9]+]]
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG299:![0-9]+]]
-// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG298]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG300:![0-9]+]]
+// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG297:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG298:![0-9]+]]
+// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG297]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG299:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD2Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG301:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG300:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META302:![0-9]+]], metadata !DIExpression()), !dbg [[DBG303:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META301:![0-9]+]], metadata !DIExpression()), !dbg [[DBG302:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG304:![0-9]+]]
-// DEBUG1-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG306:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG307:![0-9]+]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG303:![0-9]+]]
+// DEBUG1-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG305:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG306:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_Z6foobarv
-// DEBUG1-SAME: () #[[ATTR6:[0-9]+]] !dbg [[DBG308:![0-9]+]] {
+// DEBUG1-SAME: () #[[ATTR3]] !dbg [[DBG307:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[RES:%.*]] = alloca i32, align 4
-// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB27:[0-9]+]])
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META309:![0-9]+]], metadata !DIExpression()), !dbg [[DBG310:![0-9]+]]
-// DEBUG1-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB27]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG311:![0-9]+]]
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP1]], i32 0, i32 0, !dbg [[DBG312:![0-9]+]]
-// DEBUG1-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG312]]
-// DEBUG1-NEXT: store i32 [[TMP2]], ptr [[RES]], align 4, !dbg [[DBG313:![0-9]+]]
-// DEBUG1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB29:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG314:![0-9]+]]
-// DEBUG1-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG315:![0-9]+]]
-// DEBUG1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG315]]
-// DEBUG1-NEXT: [[TMP5:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG316:![0-9]+]]
-// DEBUG1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], [[TMP4]], !dbg [[DBG316]]
-// DEBUG1-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG316]]
-// DEBUG1-NEXT: [[TMP6:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG317:![0-9]+]]
-// DEBUG1-NEXT: [[TMP7:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG318:![0-9]+]]
-// DEBUG1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP7]], [[TMP6]], !dbg [[DBG318]]
-// DEBUG1-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG318]]
-// DEBUG1-NEXT: [[TMP8:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB31:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG319:![0-9]+]]
-// DEBUG1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP8]], i32 0, i32 0, !dbg [[DBG320:![0-9]+]]
-// DEBUG1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG320]]
-// DEBUG1-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG321:![0-9]+]]
-// DEBUG1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG321]]
-// DEBUG1-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG321]]
-// DEBUG1-NEXT: [[TMP11:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB33:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG322:![0-9]+]]
-// DEBUG1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP11]], i64 0, i64 1, !dbg [[DBG322]]
-// DEBUG1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG322]]
-// DEBUG1-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX5]], i32 0, i32 0, !dbg [[DBG323:![0-9]+]]
-// DEBUG1-NEXT: [[TMP12:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG323]]
-// DEBUG1-NEXT: [[TMP13:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG324:![0-9]+]]
-// DEBUG1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP13]], [[TMP12]], !dbg [[DBG324]]
-// DEBUG1-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG324]]
-// DEBUG1-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB35:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG325:![0-9]+]]
-// DEBUG1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !dbg [[DBG325]]
-// DEBUG1-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG326:![0-9]+]]
-// DEBUG1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG326]]
-// DEBUG1-NEXT: store i32 [[ADD8]], ptr [[RES]], align 4, !dbg [[DBG326]]
-// DEBUG1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB37:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG327:![0-9]+]]
-// DEBUG1-NEXT: [[TMP18:%.*]] = load float, ptr [[TMP17]], align 4, !dbg [[DBG327]]
-// DEBUG1-NEXT: [[CONV:%.*]] = fptosi float [[TMP18]] to i32, !dbg [[DBG327]]
-// DEBUG1-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG328:![0-9]+]]
-// DEBUG1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], [[CONV]], !dbg [[DBG328]]
-// DEBUG1-NEXT: store i32 [[ADD9]], ptr [[RES]], align 4, !dbg [[DBG328]]
-// DEBUG1-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB39:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG329:![0-9]+]]
-// DEBUG1-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP20]], i32 0, i32 0, !dbg [[DBG330:![0-9]+]]
-// DEBUG1-NEXT: [[TMP21:%.*]] = load i32, ptr [[A10]], align 4, !dbg [[DBG330]]
-// DEBUG1-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG331:![0-9]+]]
-// DEBUG1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG331]]
-// DEBUG1-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG331]]
-// DEBUG1-NEXT: [[TMP23:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG332:![0-9]+]]
-// DEBUG1-NEXT: ret i32 [[TMP23]], !dbg [[DBG333:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB27:[0-9]+]]), !dbg [[DBG308:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META309:![0-9]+]], metadata !DIExpression()), !dbg [[DBG310:![0-9]+]]
+// DEBUG1-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB27]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG308]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP1]], i32 0, i32 0, !dbg [[DBG311:![0-9]+]]
+// DEBUG1-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG311]]
+// DEBUG1-NEXT: store i32 [[TMP2]], ptr [[RES]], align 4, !dbg [[DBG312:![0-9]+]]
+// DEBUG1-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB29:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG313:![0-9]+]]
+// DEBUG1-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG314:![0-9]+]]
+// DEBUG1-NEXT: [[TMP4:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG314]]
+// DEBUG1-NEXT: [[TMP5:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG315:![0-9]+]]
+// DEBUG1-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], [[TMP4]], !dbg [[DBG315]]
+// DEBUG1-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG315]]
+// DEBUG1-NEXT: [[TMP6:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG316:![0-9]+]]
+// DEBUG1-NEXT: [[TMP7:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG317:![0-9]+]]
+// DEBUG1-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP7]], [[TMP6]], !dbg [[DBG317]]
+// DEBUG1-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG317]]
+// DEBUG1-NEXT: [[TMP8:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB31:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG318:![0-9]+]]
+// DEBUG1-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP8]], i32 0, i32 0, !dbg [[DBG319:![0-9]+]]
+// DEBUG1-NEXT: [[TMP9:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG319]]
+// DEBUG1-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG320:![0-9]+]]
+// DEBUG1-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG320]]
+// DEBUG1-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG320]]
+// DEBUG1-NEXT: [[TMP11:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB33:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG321:![0-9]+]]
+// DEBUG1-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP11]], i64 0, i64 1, !dbg [[DBG321]]
+// DEBUG1-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG321]]
+// DEBUG1-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX5]], i32 0, i32 0, !dbg [[DBG322:![0-9]+]]
+// DEBUG1-NEXT: [[TMP12:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG322]]
+// DEBUG1-NEXT: [[TMP13:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG323:![0-9]+]]
+// DEBUG1-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP13]], [[TMP12]], !dbg [[DBG323]]
+// DEBUG1-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG323]]
+// DEBUG1-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB35:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG324:![0-9]+]]
+// DEBUG1-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !dbg [[DBG324]]
+// DEBUG1-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG325:![0-9]+]]
+// DEBUG1-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG325]]
+// DEBUG1-NEXT: store i32 [[ADD8]], ptr [[RES]], align 4, !dbg [[DBG325]]
+// DEBUG1-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB37:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG326:![0-9]+]]
+// DEBUG1-NEXT: [[TMP18:%.*]] = load float, ptr [[TMP17]], align 4, !dbg [[DBG326]]
+// DEBUG1-NEXT: [[CONV:%.*]] = fptosi float [[TMP18]] to i32, !dbg [[DBG326]]
+// DEBUG1-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG327:![0-9]+]]
+// DEBUG1-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], [[CONV]], !dbg [[DBG327]]
+// DEBUG1-NEXT: store i32 [[ADD9]], ptr [[RES]], align 4, !dbg [[DBG327]]
+// DEBUG1-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB39:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG328:![0-9]+]]
+// DEBUG1-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP20]], i32 0, i32 0, !dbg [[DBG329:![0-9]+]]
+// DEBUG1-NEXT: [[TMP21:%.*]] = load i32, ptr [[A10]], align 4, !dbg [[DBG329]]
+// DEBUG1-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG330:![0-9]+]]
+// DEBUG1-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG330]]
+// DEBUG1-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG330]]
+// DEBUG1-NEXT: [[TMP23:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG331:![0-9]+]]
+// DEBUG1-NEXT: ret i32 [[TMP23]], !dbg [[DBG332:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@__cxx_global_var_init.8
-// DEBUG1-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG334:![0-9]+]] {
+// DEBUG1-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG333:![0-9]+]] {
// DEBUG1-NEXT: entry:
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG335:![0-9]+]]
-// DEBUG1-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG335]]
-// DEBUG1-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG335]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG334:![0-9]+]]
+// DEBUG1-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG334]]
+// DEBUG1-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG334]]
// DEBUG1: init.check:
-// DEBUG1-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG335]]
-// DEBUG1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB41:[0-9]+]]), !dbg [[DBG335]]
-// DEBUG1-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB41]], ptr @_ZN2STI2S4E2stE, ptr @.__kmpc_global_ctor_..9, ptr null, ptr @.__kmpc_global_dtor_..10), !dbg [[DBG335]]
-// DEBUG1-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG336:![0-9]+]]
-// DEBUG1-NEXT: [[TMP2:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG335]]
-// DEBUG1-NEXT: br label [[INIT_END]], !dbg [[DBG335]]
+// DEBUG1-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG334]]
+// DEBUG1-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB41:[0-9]+]]), !dbg [[DBG334]]
+// DEBUG1-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB41]], ptr @_ZN2STI2S4E2stE, ptr @.__kmpc_global_ctor_..9, ptr null, ptr @.__kmpc_global_dtor_..10), !dbg [[DBG334]]
+// DEBUG1-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG335:![0-9]+]]
+// DEBUG1-NEXT: [[TMP2:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG334]]
+// DEBUG1-NEXT: br label [[INIT_END]], !dbg [[DBG334]]
// DEBUG1: init.end:
-// DEBUG1-NEXT: ret void, !dbg [[DBG338:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG337:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..9
-// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG339:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG338:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META340:![0-9]+]], metadata !DIExpression()), !dbg [[DBG341:![0-9]+]]
-// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG342:![0-9]+]]
-// DEBUG1-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]], i32 noundef 23), !dbg [[DBG343:![0-9]+]]
-// DEBUG1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG342]]
-// DEBUG1-NEXT: ret ptr [[TMP2]], !dbg [[DBG342]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META339:![0-9]+]], metadata !DIExpression()), !dbg [[DBG340:![0-9]+]]
+// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG341:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]], i32 noundef 23), !dbg [[DBG342:![0-9]+]]
+// DEBUG1-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG341]]
+// DEBUG1-NEXT: ret ptr [[TMP2]], !dbg [[DBG341]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S4C1Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG344:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG343:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META345:![0-9]+]], metadata !DIExpression()), !dbg [[DBG347:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META344:![0-9]+]], metadata !DIExpression()), !dbg [[DBG346:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META348:![0-9]+]], metadata !DIExpression()), !dbg [[DBG349:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META347:![0-9]+]], metadata !DIExpression()), !dbg [[DBG348:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG350:![0-9]+]]
-// DEBUG1-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG350]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG351:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG349:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG349]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG350:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..10
-// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG352:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG351:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META353:![0-9]+]], metadata !DIExpression()), !dbg [[DBG354:![0-9]+]]
-// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG354]]
-// DEBUG1-NEXT: call void @_ZN2S4D1Ev(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]]) #[[ATTR4]], !dbg [[DBG354]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG355:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META352:![0-9]+]], metadata !DIExpression()), !dbg [[DBG353:![0-9]+]]
+// DEBUG1-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG353]]
+// DEBUG1-NEXT: call void @_ZN2S4D1Ev(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]]) #[[ATTR4]], !dbg [[DBG353]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG354:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S4D1Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG356:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG355:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META357:![0-9]+]], metadata !DIExpression()), !dbg [[DBG358:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META356:![0-9]+]], metadata !DIExpression()), !dbg [[DBG357:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR4]], !dbg [[DBG359:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG360:![0-9]+]]
+// DEBUG1-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR4]], !dbg [[DBG358:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG359:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S4C2Ei
-// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG361:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG360:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META362:![0-9]+]], metadata !DIExpression()), !dbg [[DBG363:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META361:![0-9]+]], metadata !DIExpression()), !dbg [[DBG362:![0-9]+]]
// DEBUG1-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META364:![0-9]+]], metadata !DIExpression()), !dbg [[DBG365:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META363:![0-9]+]], metadata !DIExpression()), !dbg [[DBG364:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG366:![0-9]+]]
-// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG367:![0-9]+]]
-// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG366]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG368:![0-9]+]]
+// DEBUG1-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG365:![0-9]+]]
+// DEBUG1-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG366:![0-9]+]]
+// DEBUG1-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG365]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG367:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_ZN2S4D2Ev
-// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG369:![0-9]+]] {
+// DEBUG1-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG368:![0-9]+]] {
// DEBUG1-NEXT: entry:
// DEBUG1-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG1-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META370:![0-9]+]], metadata !DIExpression()), !dbg [[DBG371:![0-9]+]]
+// DEBUG1-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META369:![0-9]+]], metadata !DIExpression()), !dbg [[DBG370:![0-9]+]]
// DEBUG1-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG372:![0-9]+]]
-// DEBUG1-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG374:![0-9]+]]
-// DEBUG1-NEXT: ret void, !dbg [[DBG375:![0-9]+]]
+// DEBUG1-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG371:![0-9]+]]
+// DEBUG1-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG373:![0-9]+]]
+// DEBUG1-NEXT: ret void, !dbg [[DBG374:![0-9]+]]
//
//
// DEBUG1-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// DEBUG1-SAME: () #[[ATTR0]] !dbg [[DBG376:![0-9]+]] {
+// DEBUG1-SAME: () #[[ATTR0]] !dbg [[DBG375:![0-9]+]] {
// DEBUG1-NEXT: entry:
-// DEBUG1-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG377:![0-9]+]]
-// DEBUG1-NEXT: call void @.__omp_threadprivate_init_.(), !dbg [[DBG377]]
-// DEBUG1-NEXT: call void @__cxx_global_var_init.4(), !dbg [[DBG377]]
-// DEBUG1-NEXT: call void @__cxx_global_var_init.5(), !dbg [[DBG377]]
-// DEBUG1-NEXT: call void @.__omp_threadprivate_init_..3(), !dbg [[DBG377]]
+// DEBUG1-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG376:![0-9]+]]
+// DEBUG1-NEXT: call void @.__omp_threadprivate_init_.(), !dbg [[DBG376]]
+// DEBUG1-NEXT: call void @__cxx_global_var_init.4(), !dbg [[DBG376]]
+// DEBUG1-NEXT: call void @__cxx_global_var_init.5(), !dbg [[DBG376]]
+// DEBUG1-NEXT: call void @.__omp_threadprivate_init_..3(), !dbg [[DBG376]]
// DEBUG1-NEXT: ret void
//
//
// DEBUG2-LABEL: define {{[^@]+}}@__cxx_global_var_init
// DEBUG2-SAME: () #[[ATTR0:[0-9]+]] !dbg [[DBG116:![0-9]+]] {
// DEBUG2-NEXT: entry:
-// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]), !dbg [[DBG120:![0-9]+]]
-// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB1]], ptr @_ZL3gs1, ptr @.__kmpc_global_ctor_., ptr null, ptr @.__kmpc_global_dtor_.), !dbg [[DBG120]]
-// DEBUG2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG121:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR4:[0-9]+]], !dbg [[DBG120]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG123:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1:[0-9]+]]), !dbg [[DBG119:![0-9]+]]
+// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB1]], ptr @_ZL3gs1, ptr @.__kmpc_global_ctor_., ptr null, ptr @.__kmpc_global_dtor_.), !dbg [[DBG119]]
+// DEBUG2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @_ZL3gs1, i32 noundef 5), !dbg [[DBG120:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S1D1Ev, ptr @_ZL3gs1, ptr @__dso_handle) #[[ATTR4:[0-9]+]], !dbg [[DBG119]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG122:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_.
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG124:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG123:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META126:![0-9]+]], metadata !DIExpression()), !dbg [[DBG128:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG129:![0-9]+]]
-// DEBUG2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[TMP1]], i32 noundef 5), !dbg [[DBG130:![0-9]+]]
-// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG129]]
-// DEBUG2-NEXT: ret ptr [[TMP2]], !dbg [[DBG129]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META125:![0-9]+]], metadata !DIExpression()), !dbg [[DBG127:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG128:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[TMP1]], i32 noundef 5), !dbg [[DBG129:![0-9]+]]
+// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG128]]
+// DEBUG2-NEXT: ret ptr [[TMP2]], !dbg [[DBG128]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S1C1Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG131:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2:[0-9]+]] comdat align 2 !dbg [[DBG130:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META132:![0-9]+]], metadata !DIExpression()), !dbg [[DBG134:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META131:![0-9]+]], metadata !DIExpression()), !dbg [[DBG133:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META135:![0-9]+]], metadata !DIExpression()), !dbg [[DBG136:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META134:![0-9]+]], metadata !DIExpression()), !dbg [[DBG135:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG137:![0-9]+]]
-// DEBUG2-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG137]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG138:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG136:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S1C2Ei(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG136]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG137:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_.
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG139:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG138:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META140:![0-9]+]], metadata !DIExpression()), !dbg [[DBG141:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG141]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP1]]) #[[ATTR4]], !dbg [[DBG141]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG142:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META139:![0-9]+]], metadata !DIExpression()), !dbg [[DBG140:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG140]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[TMP1]]) #[[ATTR4]], !dbg [[DBG140]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG141:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S1D1Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR3:[0-9]+]] comdat align 2 !dbg [[DBG143:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR3:[0-9]+]] comdat align 2 !dbg [[DBG142:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META144:![0-9]+]], metadata !DIExpression()), !dbg [[DBG145:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META143:![0-9]+]], metadata !DIExpression()), !dbg [[DBG144:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]], !dbg [[DBG146:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG147:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S1D2Ev(ptr noundef nonnull align 4 dereferenceable(4) [[THIS1]]) #[[ATTR4]], !dbg [[DBG145:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG146:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@__cxx_global_var_init.1
-// DEBUG2-SAME: () #[[ATTR0]] !dbg [[DBG148:![0-9]+]] {
+// DEBUG2-SAME: () #[[ATTR0]] !dbg [[DBG147:![0-9]+]] {
// DEBUG2-NEXT: entry:
-// DEBUG2-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG149:![0-9]+]]
-// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG151:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG152:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S2C1Ei(ptr noundef nonnull align 8 dereferenceable(16) @_ZL3gs2, i32 noundef 27), !dbg [[DBG148:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S2D1Ev, ptr @_ZL3gs2, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG150:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG151:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S2C1Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG153:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG152:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META154:![0-9]+]], metadata !DIExpression()), !dbg [[DBG156:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META153:![0-9]+]], metadata !DIExpression()), !dbg [[DBG155:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META157:![0-9]+]], metadata !DIExpression()), !dbg [[DBG158:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META156:![0-9]+]], metadata !DIExpression()), !dbg [[DBG157:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG159:![0-9]+]]
-// DEBUG2-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG159]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG160:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG158:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S2C2Ei(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG158]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG159:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S2D1Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG161:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG160:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META162:![0-9]+]], metadata !DIExpression()), !dbg [[DBG163:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META161:![0-9]+]], metadata !DIExpression()), !dbg [[DBG162:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR4]], !dbg [[DBG164:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG165:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S2D2Ev(ptr noundef nonnull align 8 dereferenceable(16) [[THIS1]]) #[[ATTR4]], !dbg [[DBG163:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG164:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@__cxx_global_var_init.2
-// DEBUG2-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG166:![0-9]+]] {
+// DEBUG2-SAME: () #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG165:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[ARRAYINIT_ENDOFINIT1:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: [[ARRAYINIT_ENDOFINIT5:%.*]] = alloca ptr, align 8
-// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]]), !dbg [[DBG167:![0-9]+]]
-// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB3]], ptr @arr_x, ptr @.__kmpc_global_ctor_..3, ptr null, ptr @.__kmpc_global_dtor_..4), !dbg [[DBG167]]
-// DEBUG2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG168:![0-9]+]]
-// DEBUG2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG170:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB3:[0-9]+]]), !dbg [[DBG166:![0-9]+]]
+// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB3]], ptr @arr_x, ptr @.__kmpc_global_ctor_..3, ptr null, ptr @.__kmpc_global_dtor_..4), !dbg [[DBG166]]
+// DEBUG2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG167:![0-9]+]]
+// DEBUG2-NEXT: store ptr @arr_x, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG169:![0-9]+]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) @arr_x, i32 noundef 1)
-// DEBUG2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG171:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG170:![0-9]+]]
// DEBUG2: invoke.cont:
-// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG170]]
+// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG169]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 1), i32 noundef 2)
-// DEBUG2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG172:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT2:%.*]] unwind label [[LPAD]], !dbg [[DBG171:![0-9]+]]
// DEBUG2: invoke.cont2:
-// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG170]]
+// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG169]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr @arr_x, i64 2), i32 noundef 3)
-// DEBUG2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG173:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG172:![0-9]+]]
// DEBUG2: invoke.cont3:
-// DEBUG2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG168]]
-// DEBUG2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG174:![0-9]+]]
+// DEBUG2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG167]]
+// DEBUG2-NEXT: store ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG173:![0-9]+]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i32 noundef 4)
-// DEBUG2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG175:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT7:%.*]] unwind label [[LPAD6:%.*]], !dbg [[DBG174:![0-9]+]]
// DEBUG2: invoke.cont7:
-// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG174]]
+// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG173]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 1), i32 noundef 5)
-// DEBUG2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG176:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT8:%.*]] unwind label [[LPAD6]], !dbg [[DBG175:![0-9]+]]
// DEBUG2: invoke.cont8:
-// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG174]]
+// DEBUG2-NEXT: store ptr getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG173]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) getelementptr inbounds ([[STRUCT_S1]], ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), i64 2), i32 noundef 6)
-// DEBUG2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG177:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT9:%.*]] unwind label [[LPAD6]], !dbg [[DBG176:![0-9]+]]
// DEBUG2: invoke.cont9:
-// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG167]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG167]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__cxa_atexit(ptr @__cxx_global_array_dtor, ptr null, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG166]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG166]]
// DEBUG2: lpad:
// DEBUG2-NEXT: [[TMP2:%.*]] = landingpad { ptr, i32 }
-// DEBUG2-NEXT: cleanup, !dbg [[DBG178:![0-9]+]]
-// DEBUG2-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP2]], 0, !dbg [[DBG178]]
-// DEBUG2-NEXT: store ptr [[TMP3]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG178]]
-// DEBUG2-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP2]], 1, !dbg [[DBG178]]
-// DEBUG2-NEXT: store i32 [[TMP4]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG178]]
-// DEBUG2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG170]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP5]], !dbg [[DBG170]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG170]]
+// DEBUG2-NEXT: cleanup, !dbg [[DBG177:![0-9]+]]
+// DEBUG2-NEXT: [[TMP3:%.*]] = extractvalue { ptr, i32 } [[TMP2]], 0, !dbg [[DBG177]]
+// DEBUG2-NEXT: store ptr [[TMP3]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG177]]
+// DEBUG2-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP2]], 1, !dbg [[DBG177]]
+// DEBUG2-NEXT: store i32 [[TMP4]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG177]]
+// DEBUG2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT1]], align 8, !dbg [[DBG169]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr @arr_x, [[TMP5]], !dbg [[DBG169]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE4:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG169]]
// DEBUG2: arraydestroy.body:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG170]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG170]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG170]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG170]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG170]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP5]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG169]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG169]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG169]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG169]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE4]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG169]]
// DEBUG2: arraydestroy.done4:
-// DEBUG2-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG170]]
+// DEBUG2-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG169]]
// DEBUG2: lpad6:
// DEBUG2-NEXT: [[TMP6:%.*]] = landingpad { ptr, i32 }
-// DEBUG2-NEXT: cleanup, !dbg [[DBG178]]
-// DEBUG2-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 0, !dbg [[DBG178]]
-// DEBUG2-NEXT: store ptr [[TMP7]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG178]]
-// DEBUG2-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 1, !dbg [[DBG178]]
-// DEBUG2-NEXT: store i32 [[TMP8]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG178]]
-// DEBUG2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG174]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP9]], !dbg [[DBG174]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG174]]
+// DEBUG2-NEXT: cleanup, !dbg [[DBG177]]
+// DEBUG2-NEXT: [[TMP7:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 0, !dbg [[DBG177]]
+// DEBUG2-NEXT: store ptr [[TMP7]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG177]]
+// DEBUG2-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP6]], 1, !dbg [[DBG177]]
+// DEBUG2-NEXT: store i32 [[TMP8]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG177]]
+// DEBUG2-NEXT: [[TMP9:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT5]], align 8, !dbg [[DBG173]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY10:%.*]] = icmp eq ptr getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), [[TMP9]], !dbg [[DBG173]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY10]], label [[ARRAYDESTROY_DONE15:%.*]], label [[ARRAYDESTROY_BODY11:%.*]], !dbg [[DBG173]]
// DEBUG2: arraydestroy.body11:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP9]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG174]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG174]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR4]], !dbg [[DBG174]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG174]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG174]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST12:%.*]] = phi ptr [ [[TMP9]], [[LPAD6]] ], [ [[ARRAYDESTROY_ELEMENT13:%.*]], [[ARRAYDESTROY_BODY11]] ], !dbg [[DBG173]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT13]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST12]], i64 -1, !dbg [[DBG173]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT13]]) #[[ATTR4]], !dbg [[DBG173]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE14:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT13]], getelementptr inbounds ([3 x %struct.S1], ptr @arr_x, i64 1), !dbg [[DBG173]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE14]], label [[ARRAYDESTROY_DONE15]], label [[ARRAYDESTROY_BODY11]], !dbg [[DBG173]]
// DEBUG2: arraydestroy.done15:
-// DEBUG2-NEXT: br label [[EHCLEANUP]], !dbg [[DBG174]]
+// DEBUG2-NEXT: br label [[EHCLEANUP]], !dbg [[DBG173]]
// DEBUG2: ehcleanup:
-// DEBUG2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG168]]
-// DEBUG2-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP10]], i64 0, i64 0, !dbg [[DBG168]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG168]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG168]]
+// DEBUG2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG167]]
+// DEBUG2-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP10]], i64 0, i64 0, !dbg [[DBG167]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr @arr_x, [[PAD_ARRAYEND]], !dbg [[DBG167]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG167]]
// DEBUG2: arraydestroy.body17:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG168]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG168]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR4]], !dbg [[DBG168]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG168]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG168]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG167]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG167]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR4]], !dbg [[DBG167]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], @arr_x, !dbg [[DBG167]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG167]]
// DEBUG2: arraydestroy.done21:
-// DEBUG2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG168]]
+// DEBUG2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG167]]
// DEBUG2: eh.resume:
-// DEBUG2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG168]]
-// DEBUG2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG168]]
-// DEBUG2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG168]]
-// DEBUG2-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG168]]
-// DEBUG2-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG168]]
+// DEBUG2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG167]]
+// DEBUG2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG167]]
+// DEBUG2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG167]]
+// DEBUG2-NEXT: [[LPAD_VAL22:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG167]]
+// DEBUG2-NEXT: resume { ptr, i32 } [[LPAD_VAL22]], !dbg [[DBG167]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..3
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG179:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] personality ptr @__gxx_personality_v0 !dbg [[DBG178:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[ARRAYINIT_ENDOFINIT:%.*]] = alloca ptr, align 8
@@ -7372,136 +7372,136 @@ int foobar() {
// DEBUG2-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: [[ARRAYINIT_ENDOFINIT9:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META180:![0-9]+]], metadata !DIExpression()), !dbg [[DBG181:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG182:![0-9]+]]
-// DEBUG2-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP1]], i64 0, i64 0, !dbg [[DBG183:![0-9]+]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_BEGIN]], ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[ARRAYINIT_BEGIN1:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0, !dbg [[DBG184:![0-9]+]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_BEGIN1]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG184]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META179:![0-9]+]], metadata !DIExpression()), !dbg [[DBG180:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG181:![0-9]+]]
+// DEBUG2-NEXT: [[ARRAYINIT_BEGIN:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP1]], i64 0, i64 0, !dbg [[DBG182:![0-9]+]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_BEGIN]], ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[ARRAYINIT_BEGIN1:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0, !dbg [[DBG183:![0-9]+]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_BEGIN1]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG183]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN1]], i32 noundef 1)
-// DEBUG2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG185:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG184:![0-9]+]]
// DEBUG2: invoke.cont:
-// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[ARRAYINIT_BEGIN1]], i64 1, !dbg [[DBG184]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG184]]
+// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[ARRAYINIT_BEGIN1]], i64 1, !dbg [[DBG183]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG183]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT]], i32 noundef 2)
-// DEBUG2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG186:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT3:%.*]] unwind label [[LPAD]], !dbg [[DBG185:![0-9]+]]
// DEBUG2: invoke.cont3:
-// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT]], i64 1, !dbg [[DBG184]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT4]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG184]]
+// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT4:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT]], i64 1, !dbg [[DBG183]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT4]], ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG183]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT4]], i32 noundef 3)
-// DEBUG2-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]], !dbg [[DBG187:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT5:%.*]] unwind label [[LPAD]], !dbg [[DBG186:![0-9]+]]
// DEBUG2: invoke.cont5:
-// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT7:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 1, !dbg [[DBG183]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT7]], ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[ARRAYINIT_BEGIN8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_ELEMENT7]], i64 0, i64 0, !dbg [[DBG188:![0-9]+]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_BEGIN8]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG188]]
+// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT7:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 1, !dbg [[DBG182]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT7]], ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[ARRAYINIT_BEGIN8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_ELEMENT7]], i64 0, i64 0, !dbg [[DBG187:![0-9]+]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_BEGIN8]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG187]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_BEGIN8]], i32 noundef 4)
-// DEBUG2-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]], !dbg [[DBG189:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT11:%.*]] unwind label [[LPAD10:%.*]], !dbg [[DBG188:![0-9]+]]
// DEBUG2: invoke.cont11:
-// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT12:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_BEGIN8]], i64 1, !dbg [[DBG188]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT12]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG188]]
+// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT12:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_BEGIN8]], i64 1, !dbg [[DBG187]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT12]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG187]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT12]], i32 noundef 5)
-// DEBUG2-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]], !dbg [[DBG190:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT13:%.*]] unwind label [[LPAD10]], !dbg [[DBG189:![0-9]+]]
// DEBUG2: invoke.cont13:
-// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT14:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT12]], i64 1, !dbg [[DBG188]]
-// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT14]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG188]]
+// DEBUG2-NEXT: [[ARRAYINIT_ELEMENT14:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYINIT_ELEMENT12]], i64 1, !dbg [[DBG187]]
+// DEBUG2-NEXT: store ptr [[ARRAYINIT_ELEMENT14]], ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG187]]
// DEBUG2-NEXT: invoke void @_ZN2S1C1Ei(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYINIT_ELEMENT14]], i32 noundef 6)
-// DEBUG2-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]], !dbg [[DBG191:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT15:%.*]] unwind label [[LPAD10]], !dbg [[DBG190:![0-9]+]]
// DEBUG2: invoke.cont15:
-// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG182]]
-// DEBUG2-NEXT: ret ptr [[TMP2]], !dbg [[DBG182]]
+// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG181]]
+// DEBUG2-NEXT: ret ptr [[TMP2]], !dbg [[DBG181]]
// DEBUG2: lpad:
// DEBUG2-NEXT: [[TMP3:%.*]] = landingpad { ptr, i32 }
-// DEBUG2-NEXT: cleanup, !dbg [[DBG181]]
-// DEBUG2-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0, !dbg [[DBG181]]
-// DEBUG2-NEXT: store ptr [[TMP4]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG181]]
-// DEBUG2-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 1, !dbg [[DBG181]]
-// DEBUG2-NEXT: store i32 [[TMP5]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG181]]
-// DEBUG2-NEXT: [[TMP6:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG184]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYINIT_BEGIN1]], [[TMP6]], !dbg [[DBG184]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE6:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG184]]
+// DEBUG2-NEXT: cleanup, !dbg [[DBG180]]
+// DEBUG2-NEXT: [[TMP4:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 0, !dbg [[DBG180]]
+// DEBUG2-NEXT: store ptr [[TMP4]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG180]]
+// DEBUG2-NEXT: [[TMP5:%.*]] = extractvalue { ptr, i32 } [[TMP3]], 1, !dbg [[DBG180]]
+// DEBUG2-NEXT: store i32 [[TMP5]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG180]]
+// DEBUG2-NEXT: [[TMP6:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT2]], align 8, !dbg [[DBG183]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY:%.*]] = icmp eq ptr [[ARRAYINIT_BEGIN1]], [[TMP6]], !dbg [[DBG183]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY]], label [[ARRAYDESTROY_DONE6:%.*]], label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG183]]
// DEBUG2: arraydestroy.body:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP6]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG184]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG184]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG184]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAYINIT_BEGIN1]], !dbg [[DBG184]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE6]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG184]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP6]], [[LPAD]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG183]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG183]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG183]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[ARRAYINIT_BEGIN1]], !dbg [[DBG183]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE6]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG183]]
// DEBUG2: arraydestroy.done6:
-// DEBUG2-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG184]]
+// DEBUG2-NEXT: br label [[EHCLEANUP:%.*]], !dbg [[DBG183]]
// DEBUG2: lpad10:
// DEBUG2-NEXT: [[TMP7:%.*]] = landingpad { ptr, i32 }
-// DEBUG2-NEXT: cleanup, !dbg [[DBG181]]
-// DEBUG2-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 0, !dbg [[DBG181]]
-// DEBUG2-NEXT: store ptr [[TMP8]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG181]]
-// DEBUG2-NEXT: [[TMP9:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 1, !dbg [[DBG181]]
-// DEBUG2-NEXT: store i32 [[TMP9]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG181]]
-// DEBUG2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG188]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr [[ARRAYINIT_BEGIN8]], [[TMP10]], !dbg [[DBG188]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG188]]
+// DEBUG2-NEXT: cleanup, !dbg [[DBG180]]
+// DEBUG2-NEXT: [[TMP8:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 0, !dbg [[DBG180]]
+// DEBUG2-NEXT: store ptr [[TMP8]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG180]]
+// DEBUG2-NEXT: [[TMP9:%.*]] = extractvalue { ptr, i32 } [[TMP7]], 1, !dbg [[DBG180]]
+// DEBUG2-NEXT: store i32 [[TMP9]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG180]]
+// DEBUG2-NEXT: [[TMP10:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT9]], align 8, !dbg [[DBG187]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY16:%.*]] = icmp eq ptr [[ARRAYINIT_BEGIN8]], [[TMP10]], !dbg [[DBG187]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY16]], label [[ARRAYDESTROY_DONE21:%.*]], label [[ARRAYDESTROY_BODY17:%.*]], !dbg [[DBG187]]
// DEBUG2: arraydestroy.body17:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[TMP10]], [[LPAD10]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG188]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG188]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR4]], !dbg [[DBG188]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], [[ARRAYINIT_BEGIN8]], !dbg [[DBG188]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG188]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST18:%.*]] = phi ptr [ [[TMP10]], [[LPAD10]] ], [ [[ARRAYDESTROY_ELEMENT19:%.*]], [[ARRAYDESTROY_BODY17]] ], !dbg [[DBG187]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT19]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST18]], i64 -1, !dbg [[DBG187]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT19]]) #[[ATTR4]], !dbg [[DBG187]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE20:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT19]], [[ARRAYINIT_BEGIN8]], !dbg [[DBG187]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE20]], label [[ARRAYDESTROY_DONE21]], label [[ARRAYDESTROY_BODY17]], !dbg [[DBG187]]
// DEBUG2: arraydestroy.done21:
-// DEBUG2-NEXT: br label [[EHCLEANUP]], !dbg [[DBG188]]
+// DEBUG2-NEXT: br label [[EHCLEANUP]], !dbg [[DBG187]]
// DEBUG2: ehcleanup:
-// DEBUG2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[PAD_ARRAYBEGIN:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP11]], i64 0, i64 0, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY22:%.*]] = icmp eq ptr [[PAD_ARRAYBEGIN]], [[PAD_ARRAYEND]], !dbg [[DBG183]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY22]], label [[ARRAYDESTROY_DONE27:%.*]], label [[ARRAYDESTROY_BODY23:%.*]], !dbg [[DBG183]]
+// DEBUG2-NEXT: [[TMP11:%.*]] = load ptr, ptr [[ARRAYINIT_ENDOFINIT]], align 8, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[PAD_ARRAYBEGIN:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYINIT_BEGIN]], i64 0, i64 0, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[PAD_ARRAYEND:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[TMP11]], i64 0, i64 0, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ISEMPTY22:%.*]] = icmp eq ptr [[PAD_ARRAYBEGIN]], [[PAD_ARRAYEND]], !dbg [[DBG182]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_ISEMPTY22]], label [[ARRAYDESTROY_DONE27:%.*]], label [[ARRAYDESTROY_BODY23:%.*]], !dbg [[DBG182]]
// DEBUG2: arraydestroy.body23:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST24:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT25:%.*]], [[ARRAYDESTROY_BODY23]] ], !dbg [[DBG183]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT25]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST24]], i64 -1, !dbg [[DBG183]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT25]]) #[[ATTR4]], !dbg [[DBG183]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE26:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT25]], [[PAD_ARRAYBEGIN]], !dbg [[DBG183]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE26]], label [[ARRAYDESTROY_DONE27]], label [[ARRAYDESTROY_BODY23]], !dbg [[DBG183]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST24:%.*]] = phi ptr [ [[PAD_ARRAYEND]], [[EHCLEANUP]] ], [ [[ARRAYDESTROY_ELEMENT25:%.*]], [[ARRAYDESTROY_BODY23]] ], !dbg [[DBG182]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT25]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST24]], i64 -1, !dbg [[DBG182]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT25]]) #[[ATTR4]], !dbg [[DBG182]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE26:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT25]], [[PAD_ARRAYBEGIN]], !dbg [[DBG182]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE26]], label [[ARRAYDESTROY_DONE27]], label [[ARRAYDESTROY_BODY23]], !dbg [[DBG182]]
// DEBUG2: arraydestroy.done27:
-// DEBUG2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG183]]
+// DEBUG2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG182]]
// DEBUG2: eh.resume:
-// DEBUG2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG183]]
-// DEBUG2-NEXT: [[LPAD_VAL28:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG183]]
-// DEBUG2-NEXT: resume { ptr, i32 } [[LPAD_VAL28]], !dbg [[DBG183]]
+// DEBUG2-NEXT: [[EXN:%.*]] = load ptr, ptr [[EXN_SLOT]], align 8, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[SEL:%.*]] = load i32, ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[LPAD_VAL:%.*]] = insertvalue { ptr, i32 } poison, ptr [[EXN]], 0, !dbg [[DBG182]]
+// DEBUG2-NEXT: [[LPAD_VAL28:%.*]] = insertvalue { ptr, i32 } [[LPAD_VAL]], i32 [[SEL]], 1, !dbg [[DBG182]]
+// DEBUG2-NEXT: resume { ptr, i32 } [[LPAD_VAL28]], !dbg [[DBG182]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..4
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG192:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG191:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META193:![0-9]+]], metadata !DIExpression()), !dbg [[DBG194:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG194]]
-// DEBUG2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP1]], i64 6, !dbg [[DBG194]]
-// DEBUG2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG194]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META192:![0-9]+]], metadata !DIExpression()), !dbg [[DBG193:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG193]]
+// DEBUG2-NEXT: [[TMP2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP1]], i64 6, !dbg [[DBG193]]
+// DEBUG2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG193]]
// DEBUG2: arraydestroy.body:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG194]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG194]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG194]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[TMP1]], !dbg [[DBG194]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG194]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ [[TMP2]], [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG193]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG193]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG193]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], [[TMP1]], !dbg [[DBG193]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG193]]
// DEBUG2: arraydestroy.done1:
-// DEBUG2-NEXT: ret void, !dbg [[DBG195:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG194:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@__cxx_global_array_dtor
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG196:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG195:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META199:![0-9]+]], metadata !DIExpression()), !dbg [[DBG200:![0-9]+]]
-// DEBUG2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG200]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META198:![0-9]+]], metadata !DIExpression()), !dbg [[DBG199:![0-9]+]]
+// DEBUG2-NEXT: br label [[ARRAYDESTROY_BODY:%.*]], !dbg [[DBG199]]
// DEBUG2: arraydestroy.body:
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG200]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG200]]
-// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG200]]
-// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG200]]
-// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG200]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENTPAST:%.*]] = phi ptr [ getelementptr inbounds ([[STRUCT_S1:%.*]], ptr @arr_x, i64 6), [[ENTRY:%.*]] ], [ [[ARRAYDESTROY_ELEMENT:%.*]], [[ARRAYDESTROY_BODY]] ], !dbg [[DBG199]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_ELEMENT]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYDESTROY_ELEMENTPAST]], i64 -1, !dbg [[DBG199]]
+// DEBUG2-NEXT: call void @_ZN2S1D1Ev(ptr noundef nonnull align 4 dereferenceable(4) [[ARRAYDESTROY_ELEMENT]]) #[[ATTR4]], !dbg [[DBG199]]
+// DEBUG2-NEXT: [[ARRAYDESTROY_DONE:%.*]] = icmp eq ptr [[ARRAYDESTROY_ELEMENT]], @arr_x, !dbg [[DBG199]]
+// DEBUG2-NEXT: br i1 [[ARRAYDESTROY_DONE]], label [[ARRAYDESTROY_DONE1:%.*]], label [[ARRAYDESTROY_BODY]], !dbg [[DBG199]]
// DEBUG2: arraydestroy.done1:
-// DEBUG2-NEXT: ret void, !dbg [[DBG200]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG199]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@main
@@ -7511,9 +7511,9 @@ int foobar() {
// DEBUG2-NEXT: [[RES:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: [[EXN_SLOT:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[EHSELECTOR_SLOT:%.*]] = alloca i32, align 4
-// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9:[0-9]+]])
+// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB9:[0-9]+]]), !dbg [[DBG200:![0-9]+]]
// DEBUG2-NEXT: store i32 0, ptr [[RETVAL]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META201:![0-9]+]], metadata !DIExpression()), !dbg [[DBG202:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META201:![0-9]+]], metadata !DIExpression()), !dbg [[DBG202:![0-9]+]]
// DEBUG2-NEXT: [[TMP1:%.*]] = load atomic i8, ptr @_ZGVZ4mainE2sm acquire, align 8, !dbg [[DBG203:![0-9]+]]
// DEBUG2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP1]], 0, !dbg [[DBG203]]
// DEBUG2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG203]], !prof [[PROF204:![0-9]+]]
@@ -7524,76 +7524,76 @@ int foobar() {
// DEBUG2: init:
// DEBUG2-NEXT: [[TMP3:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB7:[0-9]+]]), !dbg [[DBG203]]
// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB7]], ptr @_ZZ4mainE2sm, ptr @.__kmpc_global_ctor_..5, ptr null, ptr @.__kmpc_global_dtor_..6), !dbg [[DBG203]]
-// DEBUG2-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB9]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG205:![0-9]+]]
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP4]], i32 0, i32 0, !dbg [[DBG206:![0-9]+]]
-// DEBUG2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG206]]
+// DEBUG2-NEXT: [[TMP4:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB9]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG200]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP4]], i32 0, i32 0, !dbg [[DBG205:![0-9]+]]
+// DEBUG2-NEXT: [[TMP5:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG205]]
// DEBUG2-NEXT: invoke void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) @_ZZ4mainE2sm, i32 noundef [[TMP5]])
-// DEBUG2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG207:![0-9]+]]
+// DEBUG2-NEXT: to label [[INVOKE_CONT:%.*]] unwind label [[LPAD:%.*]], !dbg [[DBG206:![0-9]+]]
// DEBUG2: invoke.cont:
// DEBUG2-NEXT: [[TMP6:%.*]] = call i32 @__cxa_atexit(ptr @_ZZ4mainEN5SmainD1Ev, ptr @_ZZ4mainE2sm, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG203]]
// DEBUG2-NEXT: call void @__cxa_guard_release(ptr @_ZGVZ4mainE2sm) #[[ATTR4]], !dbg [[DBG203]]
// DEBUG2-NEXT: br label [[INIT_END]], !dbg [[DBG203]]
// DEBUG2: init.end:
-// DEBUG2-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB11:[0-9]+]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG208:![0-9]+]]
-// DEBUG2-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP7]], i32 0, i32 0, !dbg [[DBG209:![0-9]+]]
-// DEBUG2-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG209]]
-// DEBUG2-NEXT: store i32 [[TMP8]], ptr [[RES]], align 4, !dbg [[DBG210:![0-9]+]]
-// DEBUG2-NEXT: [[TMP9:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB13:[0-9]+]], i32 [[TMP0]], ptr @_ZZ4mainE2sm, i64 24, ptr @_ZZ4mainE2sm.cache.), !dbg [[DBG211:![0-9]+]]
-// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[TMP9]], i32 0, i32 0, !dbg [[DBG212:![0-9]+]]
-// DEBUG2-NEXT: [[TMP10:%.*]] = load i32, ptr [[A2]], align 8, !dbg [[DBG212]]
-// DEBUG2-NEXT: [[TMP11:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG213:![0-9]+]]
-// DEBUG2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]], !dbg [[DBG213]]
-// DEBUG2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG213]]
-// DEBUG2-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB15:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG214:![0-9]+]]
-// DEBUG2-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP12]], i32 0, i32 0, !dbg [[DBG215:![0-9]+]]
-// DEBUG2-NEXT: [[TMP13:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG215]]
-// DEBUG2-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG216:![0-9]+]]
-// DEBUG2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG216]]
-// DEBUG2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG216]]
-// DEBUG2-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG217:![0-9]+]]
-// DEBUG2-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG218:![0-9]+]]
-// DEBUG2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG218]]
-// DEBUG2-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG218]]
-// DEBUG2-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB17:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG219:![0-9]+]]
-// DEBUG2-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP17]], i32 0, i32 0, !dbg [[DBG220:![0-9]+]]
-// DEBUG2-NEXT: [[TMP18:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG220]]
-// DEBUG2-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG221:![0-9]+]]
-// DEBUG2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], [[TMP18]], !dbg [[DBG221]]
-// DEBUG2-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG221]]
-// DEBUG2-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB19:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG222:![0-9]+]]
-// DEBUG2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP20]], i64 0, i64 1, !dbg [[DBG222]]
-// DEBUG2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG222]]
-// DEBUG2-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX8]], i32 0, i32 0, !dbg [[DBG223:![0-9]+]]
-// DEBUG2-NEXT: [[TMP21:%.*]] = load i32, ptr [[A9]], align 4, !dbg [[DBG223]]
-// DEBUG2-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG224:![0-9]+]]
-// DEBUG2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG224]]
-// DEBUG2-NEXT: store i32 [[ADD10]], ptr [[RES]], align 4, !dbg [[DBG224]]
-// DEBUG2-NEXT: [[TMP23:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB21:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG225:![0-9]+]]
-// DEBUG2-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4, !dbg [[DBG225]]
-// DEBUG2-NEXT: [[TMP25:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG226:![0-9]+]]
-// DEBUG2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP25]], [[TMP24]], !dbg [[DBG226]]
-// DEBUG2-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG226]]
-// DEBUG2-NEXT: [[TMP26:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB23:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG227:![0-9]+]]
-// DEBUG2-NEXT: [[TMP27:%.*]] = load float, ptr [[TMP26]], align 4, !dbg [[DBG227]]
-// DEBUG2-NEXT: [[CONV:%.*]] = fptosi float [[TMP27]] to i32, !dbg [[DBG227]]
-// DEBUG2-NEXT: [[TMP28:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG228:![0-9]+]]
-// DEBUG2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[CONV]], !dbg [[DBG228]]
-// DEBUG2-NEXT: store i32 [[ADD12]], ptr [[RES]], align 4, !dbg [[DBG228]]
-// DEBUG2-NEXT: [[TMP29:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB25:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG229:![0-9]+]]
-// DEBUG2-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP29]], i32 0, i32 0, !dbg [[DBG230:![0-9]+]]
-// DEBUG2-NEXT: [[TMP30:%.*]] = load i32, ptr [[A13]], align 4, !dbg [[DBG230]]
-// DEBUG2-NEXT: [[TMP31:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
-// DEBUG2-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP31]], [[TMP30]], !dbg [[DBG231]]
-// DEBUG2-NEXT: store i32 [[ADD14]], ptr [[RES]], align 4, !dbg [[DBG231]]
-// DEBUG2-NEXT: [[TMP32:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG232:![0-9]+]]
-// DEBUG2-NEXT: ret i32 [[TMP32]], !dbg [[DBG233:![0-9]+]]
+// DEBUG2-NEXT: [[TMP7:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB11:[0-9]+]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG207:![0-9]+]]
+// DEBUG2-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP7]], i32 0, i32 0, !dbg [[DBG208:![0-9]+]]
+// DEBUG2-NEXT: [[TMP8:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG208]]
+// DEBUG2-NEXT: store i32 [[TMP8]], ptr [[RES]], align 4, !dbg [[DBG209:![0-9]+]]
+// DEBUG2-NEXT: [[TMP9:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB13:[0-9]+]], i32 [[TMP0]], ptr @_ZZ4mainE2sm, i64 24, ptr @_ZZ4mainE2sm.cache.), !dbg [[DBG210:![0-9]+]]
+// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[TMP9]], i32 0, i32 0, !dbg [[DBG211:![0-9]+]]
+// DEBUG2-NEXT: [[TMP10:%.*]] = load i32, ptr [[A2]], align 8, !dbg [[DBG211]]
+// DEBUG2-NEXT: [[TMP11:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG212:![0-9]+]]
+// DEBUG2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP11]], [[TMP10]], !dbg [[DBG212]]
+// DEBUG2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG212]]
+// DEBUG2-NEXT: [[TMP12:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB15:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG213:![0-9]+]]
+// DEBUG2-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[TMP12]], i32 0, i32 0, !dbg [[DBG214:![0-9]+]]
+// DEBUG2-NEXT: [[TMP13:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG214]]
+// DEBUG2-NEXT: [[TMP14:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG215:![0-9]+]]
+// DEBUG2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP14]], [[TMP13]], !dbg [[DBG215]]
+// DEBUG2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG215]]
+// DEBUG2-NEXT: [[TMP15:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG216:![0-9]+]]
+// DEBUG2-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG217:![0-9]+]]
+// DEBUG2-NEXT: [[ADD5:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG217]]
+// DEBUG2-NEXT: store i32 [[ADD5]], ptr [[RES]], align 4, !dbg [[DBG217]]
+// DEBUG2-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB17:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG218:![0-9]+]]
+// DEBUG2-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP17]], i32 0, i32 0, !dbg [[DBG219:![0-9]+]]
+// DEBUG2-NEXT: [[TMP18:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG219]]
+// DEBUG2-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG220:![0-9]+]]
+// DEBUG2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP19]], [[TMP18]], !dbg [[DBG220]]
+// DEBUG2-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG220]]
+// DEBUG2-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB19:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG221:![0-9]+]]
+// DEBUG2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP20]], i64 0, i64 1, !dbg [[DBG221]]
+// DEBUG2-NEXT: [[ARRAYIDX8:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG221]]
+// DEBUG2-NEXT: [[A9:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX8]], i32 0, i32 0, !dbg [[DBG222:![0-9]+]]
+// DEBUG2-NEXT: [[TMP21:%.*]] = load i32, ptr [[A9]], align 4, !dbg [[DBG222]]
+// DEBUG2-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG223:![0-9]+]]
+// DEBUG2-NEXT: [[ADD10:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG223]]
+// DEBUG2-NEXT: store i32 [[ADD10]], ptr [[RES]], align 4, !dbg [[DBG223]]
+// DEBUG2-NEXT: [[TMP23:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB21:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG224:![0-9]+]]
+// DEBUG2-NEXT: [[TMP24:%.*]] = load i32, ptr [[TMP23]], align 4, !dbg [[DBG224]]
+// DEBUG2-NEXT: [[TMP25:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG225:![0-9]+]]
+// DEBUG2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP25]], [[TMP24]], !dbg [[DBG225]]
+// DEBUG2-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG225]]
+// DEBUG2-NEXT: [[TMP26:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB23:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG226:![0-9]+]]
+// DEBUG2-NEXT: [[TMP27:%.*]] = load float, ptr [[TMP26]], align 4, !dbg [[DBG226]]
+// DEBUG2-NEXT: [[CONV:%.*]] = fptosi float [[TMP27]] to i32, !dbg [[DBG226]]
+// DEBUG2-NEXT: [[TMP28:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG227:![0-9]+]]
+// DEBUG2-NEXT: [[ADD12:%.*]] = add nsw i32 [[TMP28]], [[CONV]], !dbg [[DBG227]]
+// DEBUG2-NEXT: store i32 [[ADD12]], ptr [[RES]], align 4, !dbg [[DBG227]]
+// DEBUG2-NEXT: [[TMP29:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB25:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG228:![0-9]+]]
+// DEBUG2-NEXT: [[A13:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP29]], i32 0, i32 0, !dbg [[DBG229:![0-9]+]]
+// DEBUG2-NEXT: [[TMP30:%.*]] = load i32, ptr [[A13]], align 4, !dbg [[DBG229]]
+// DEBUG2-NEXT: [[TMP31:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG230:![0-9]+]]
+// DEBUG2-NEXT: [[ADD14:%.*]] = add nsw i32 [[TMP31]], [[TMP30]], !dbg [[DBG230]]
+// DEBUG2-NEXT: store i32 [[ADD14]], ptr [[RES]], align 4, !dbg [[DBG230]]
+// DEBUG2-NEXT: [[TMP32:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG231:![0-9]+]]
+// DEBUG2-NEXT: ret i32 [[TMP32]], !dbg [[DBG232:![0-9]+]]
// DEBUG2: lpad:
// DEBUG2-NEXT: [[TMP33:%.*]] = landingpad { ptr, i32 }
-// DEBUG2-NEXT: cleanup, !dbg [[DBG234:![0-9]+]]
-// DEBUG2-NEXT: [[TMP34:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 0, !dbg [[DBG234]]
-// DEBUG2-NEXT: store ptr [[TMP34]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG234]]
-// DEBUG2-NEXT: [[TMP35:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 1, !dbg [[DBG234]]
-// DEBUG2-NEXT: store i32 [[TMP35]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG234]]
+// DEBUG2-NEXT: cleanup, !dbg [[DBG233:![0-9]+]]
+// DEBUG2-NEXT: [[TMP34:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 0, !dbg [[DBG233]]
+// DEBUG2-NEXT: store ptr [[TMP34]], ptr [[EXN_SLOT]], align 8, !dbg [[DBG233]]
+// DEBUG2-NEXT: [[TMP35:%.*]] = extractvalue { ptr, i32 } [[TMP33]], 1, !dbg [[DBG233]]
+// DEBUG2-NEXT: store i32 [[TMP35]], ptr [[EHSELECTOR_SLOT]], align 4, !dbg [[DBG233]]
// DEBUG2-NEXT: call void @__cxa_guard_abort(ptr @_ZGVZ4mainE2sm) #[[ATTR4]], !dbg [[DBG203]]
// DEBUG2-NEXT: br label [[EH_RESUME:%.*]], !dbg [[DBG203]]
// DEBUG2: eh.resume:
@@ -7605,296 +7605,296 @@ int foobar() {
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..5
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG235:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG234:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
-// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]])
+// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB5:[0-9]+]]), !dbg [[DBG235:![0-9]+]]
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META236:![0-9]+]], metadata !DIExpression()), !dbg [[DBG237:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META236:![0-9]+]], metadata !DIExpression()), !dbg [[DBG237:![0-9]+]]
// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG238:![0-9]+]]
-// DEBUG2-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB5]], i32 [[TMP1]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG239:![0-9]+]]
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG240:![0-9]+]]
-// DEBUG2-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG240]]
-// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) [[TMP2]], i32 noundef [[TMP4]]), !dbg [[DBG241:![0-9]+]]
+// DEBUG2-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB5]], i32 [[TMP1]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG235]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG239:![0-9]+]]
+// DEBUG2-NEXT: [[TMP4:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG239]]
+// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainC1Ei(ptr noundef nonnull align 8 dereferenceable(24) [[TMP2]], i32 noundef [[TMP4]]), !dbg [[DBG240:![0-9]+]]
// DEBUG2-NEXT: [[TMP5:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG238]]
// DEBUG2-NEXT: ret ptr [[TMP5]], !dbg [[DBG238]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC1Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG242:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] align 2 !dbg [[DBG241:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META243:![0-9]+]], metadata !DIExpression()), !dbg [[DBG245:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META242:![0-9]+]], metadata !DIExpression()), !dbg [[DBG244:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META246:![0-9]+]], metadata !DIExpression()), !dbg [[DBG247:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META245:![0-9]+]], metadata !DIExpression()), !dbg [[DBG246:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG248:![0-9]+]]
-// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG248]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG249:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG247:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainC2Ei(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG247]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG248:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..6
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG250:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG249:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META251:![0-9]+]], metadata !DIExpression()), !dbg [[DBG252:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG252]]
-// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[TMP1]]) #[[ATTR4]], !dbg [[DBG252]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG253:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META250:![0-9]+]], metadata !DIExpression()), !dbg [[DBG251:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG251]]
+// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainD1Ev(ptr noundef nonnull align 8 dereferenceable(24) [[TMP1]]) #[[ATTR4]], !dbg [[DBG251]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG252:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD1Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG254:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG253:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META255:![0-9]+]], metadata !DIExpression()), !dbg [[DBG256:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META254:![0-9]+]], metadata !DIExpression()), !dbg [[DBG255:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR4]], !dbg [[DBG257:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG258:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZZ4mainEN5SmainD2Ev(ptr noundef nonnull align 8 dereferenceable(24) [[THIS1]]) #[[ATTR4]], !dbg [[DBG256:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG257:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_Z6foobarv
-// DEBUG2-SAME: () #[[ATTR6:[0-9]+]] !dbg [[DBG259:![0-9]+]] {
+// DEBUG2-SAME: () #[[ATTR3]] !dbg [[DBG258:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[RES:%.*]] = alloca i32, align 4
-// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB27:[0-9]+]])
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META260:![0-9]+]], metadata !DIExpression()), !dbg [[DBG261:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB27]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG262:![0-9]+]]
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP1]], i32 0, i32 0, !dbg [[DBG263:![0-9]+]]
-// DEBUG2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG263]]
-// DEBUG2-NEXT: store i32 [[TMP2]], ptr [[RES]], align 4, !dbg [[DBG264:![0-9]+]]
-// DEBUG2-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB29:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG265:![0-9]+]]
-// DEBUG2-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG266:![0-9]+]]
-// DEBUG2-NEXT: [[TMP4:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG266]]
-// DEBUG2-NEXT: [[TMP5:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG267:![0-9]+]]
-// DEBUG2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], [[TMP4]], !dbg [[DBG267]]
-// DEBUG2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG267]]
-// DEBUG2-NEXT: [[TMP6:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG268:![0-9]+]]
-// DEBUG2-NEXT: [[TMP7:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG269:![0-9]+]]
-// DEBUG2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP7]], [[TMP6]], !dbg [[DBG269]]
-// DEBUG2-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG269]]
-// DEBUG2-NEXT: [[TMP8:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB31:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG270:![0-9]+]]
-// DEBUG2-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP8]], i32 0, i32 0, !dbg [[DBG271:![0-9]+]]
-// DEBUG2-NEXT: [[TMP9:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG271]]
-// DEBUG2-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG272:![0-9]+]]
-// DEBUG2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG272]]
-// DEBUG2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG272]]
-// DEBUG2-NEXT: [[TMP11:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB33:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG273:![0-9]+]]
-// DEBUG2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP11]], i64 0, i64 1, !dbg [[DBG273]]
-// DEBUG2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG273]]
-// DEBUG2-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX5]], i32 0, i32 0, !dbg [[DBG274:![0-9]+]]
-// DEBUG2-NEXT: [[TMP12:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG274]]
-// DEBUG2-NEXT: [[TMP13:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG275:![0-9]+]]
-// DEBUG2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP13]], [[TMP12]], !dbg [[DBG275]]
-// DEBUG2-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG275]]
-// DEBUG2-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB35:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG276:![0-9]+]]
-// DEBUG2-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !dbg [[DBG276]]
-// DEBUG2-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG277:![0-9]+]]
-// DEBUG2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG277]]
-// DEBUG2-NEXT: store i32 [[ADD8]], ptr [[RES]], align 4, !dbg [[DBG277]]
-// DEBUG2-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB37:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG278:![0-9]+]]
-// DEBUG2-NEXT: [[TMP18:%.*]] = load float, ptr [[TMP17]], align 4, !dbg [[DBG278]]
-// DEBUG2-NEXT: [[CONV:%.*]] = fptosi float [[TMP18]] to i32, !dbg [[DBG278]]
-// DEBUG2-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG279:![0-9]+]]
-// DEBUG2-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], [[CONV]], !dbg [[DBG279]]
-// DEBUG2-NEXT: store i32 [[ADD9]], ptr [[RES]], align 4, !dbg [[DBG279]]
-// DEBUG2-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB39:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG280:![0-9]+]]
-// DEBUG2-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP20]], i32 0, i32 0, !dbg [[DBG281:![0-9]+]]
-// DEBUG2-NEXT: [[TMP21:%.*]] = load i32, ptr [[A10]], align 4, !dbg [[DBG281]]
-// DEBUG2-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG282:![0-9]+]]
-// DEBUG2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG282]]
-// DEBUG2-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG282]]
-// DEBUG2-NEXT: [[TMP23:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG283:![0-9]+]]
-// DEBUG2-NEXT: ret i32 [[TMP23]], !dbg [[DBG284:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB27:[0-9]+]]), !dbg [[DBG259:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[RES]], metadata [[META260:![0-9]+]], metadata !DIExpression()), !dbg [[DBG261:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB27]], i32 [[TMP0]], ptr @_ZN6Static1sE, i64 8, ptr @_ZN6Static1sE.cache.), !dbg [[DBG259]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S3:%.*]], ptr [[TMP1]], i32 0, i32 0, !dbg [[DBG262:![0-9]+]]
+// DEBUG2-NEXT: [[TMP2:%.*]] = load i32, ptr [[A]], align 4, !dbg [[DBG262]]
+// DEBUG2-NEXT: store i32 [[TMP2]], ptr [[RES]], align 4, !dbg [[DBG263:![0-9]+]]
+// DEBUG2-NEXT: [[TMP3:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB29:[0-9]+]], i32 [[TMP0]], ptr @_ZL3gs1, i64 4, ptr @_ZL3gs1.cache.), !dbg [[DBG264:![0-9]+]]
+// DEBUG2-NEXT: [[A1:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[TMP3]], i32 0, i32 0, !dbg [[DBG265:![0-9]+]]
+// DEBUG2-NEXT: [[TMP4:%.*]] = load i32, ptr [[A1]], align 4, !dbg [[DBG265]]
+// DEBUG2-NEXT: [[TMP5:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG266:![0-9]+]]
+// DEBUG2-NEXT: [[ADD:%.*]] = add nsw i32 [[TMP5]], [[TMP4]], !dbg [[DBG266]]
+// DEBUG2-NEXT: store i32 [[ADD]], ptr [[RES]], align 4, !dbg [[DBG266]]
+// DEBUG2-NEXT: [[TMP6:%.*]] = load i32, ptr @_ZL3gs2, align 8, !dbg [[DBG267:![0-9]+]]
+// DEBUG2-NEXT: [[TMP7:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG268:![0-9]+]]
+// DEBUG2-NEXT: [[ADD2:%.*]] = add nsw i32 [[TMP7]], [[TMP6]], !dbg [[DBG268]]
+// DEBUG2-NEXT: store i32 [[ADD2]], ptr [[RES]], align 4, !dbg [[DBG268]]
+// DEBUG2-NEXT: [[TMP8:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB31:[0-9]+]], i32 [[TMP0]], ptr @gs3, i64 12, ptr @gs3.cache.), !dbg [[DBG269:![0-9]+]]
+// DEBUG2-NEXT: [[A3:%.*]] = getelementptr inbounds [[STRUCT_S5:%.*]], ptr [[TMP8]], i32 0, i32 0, !dbg [[DBG270:![0-9]+]]
+// DEBUG2-NEXT: [[TMP9:%.*]] = load i32, ptr [[A3]], align 4, !dbg [[DBG270]]
+// DEBUG2-NEXT: [[TMP10:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG271:![0-9]+]]
+// DEBUG2-NEXT: [[ADD4:%.*]] = add nsw i32 [[TMP10]], [[TMP9]], !dbg [[DBG271]]
+// DEBUG2-NEXT: store i32 [[ADD4]], ptr [[RES]], align 4, !dbg [[DBG271]]
+// DEBUG2-NEXT: [[TMP11:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB33:[0-9]+]], i32 [[TMP0]], ptr @arr_x, i64 24, ptr @arr_x.cache.), !dbg [[DBG272:![0-9]+]]
+// DEBUG2-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [2 x [3 x %struct.S1]], ptr [[TMP11]], i64 0, i64 1, !dbg [[DBG272]]
+// DEBUG2-NEXT: [[ARRAYIDX5:%.*]] = getelementptr inbounds [3 x %struct.S1], ptr [[ARRAYIDX]], i64 0, i64 1, !dbg [[DBG272]]
+// DEBUG2-NEXT: [[A6:%.*]] = getelementptr inbounds [[STRUCT_S1]], ptr [[ARRAYIDX5]], i32 0, i32 0, !dbg [[DBG273:![0-9]+]]
+// DEBUG2-NEXT: [[TMP12:%.*]] = load i32, ptr [[A6]], align 4, !dbg [[DBG273]]
+// DEBUG2-NEXT: [[TMP13:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG274:![0-9]+]]
+// DEBUG2-NEXT: [[ADD7:%.*]] = add nsw i32 [[TMP13]], [[TMP12]], !dbg [[DBG274]]
+// DEBUG2-NEXT: store i32 [[ADD7]], ptr [[RES]], align 4, !dbg [[DBG274]]
+// DEBUG2-NEXT: [[TMP14:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB35:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIiE2stE, i64 4, ptr @_ZN2STIiE2stE.cache.), !dbg [[DBG275:![0-9]+]]
+// DEBUG2-NEXT: [[TMP15:%.*]] = load i32, ptr [[TMP14]], align 4, !dbg [[DBG275]]
+// DEBUG2-NEXT: [[TMP16:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG276:![0-9]+]]
+// DEBUG2-NEXT: [[ADD8:%.*]] = add nsw i32 [[TMP16]], [[TMP15]], !dbg [[DBG276]]
+// DEBUG2-NEXT: store i32 [[ADD8]], ptr [[RES]], align 4, !dbg [[DBG276]]
+// DEBUG2-NEXT: [[TMP17:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB37:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STIfE2stE, i64 4, ptr @_ZN2STIfE2stE.cache.), !dbg [[DBG277:![0-9]+]]
+// DEBUG2-NEXT: [[TMP18:%.*]] = load float, ptr [[TMP17]], align 4, !dbg [[DBG277]]
+// DEBUG2-NEXT: [[CONV:%.*]] = fptosi float [[TMP18]] to i32, !dbg [[DBG277]]
+// DEBUG2-NEXT: [[TMP19:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG278:![0-9]+]]
+// DEBUG2-NEXT: [[ADD9:%.*]] = add nsw i32 [[TMP19]], [[CONV]], !dbg [[DBG278]]
+// DEBUG2-NEXT: store i32 [[ADD9]], ptr [[RES]], align 4, !dbg [[DBG278]]
+// DEBUG2-NEXT: [[TMP20:%.*]] = call ptr @__kmpc_threadprivate_cached(ptr @[[GLOB39:[0-9]+]], i32 [[TMP0]], ptr @_ZN2STI2S4E2stE, i64 8, ptr @_ZN2STI2S4E2stE.cache.), !dbg [[DBG279:![0-9]+]]
+// DEBUG2-NEXT: [[A10:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[TMP20]], i32 0, i32 0, !dbg [[DBG280:![0-9]+]]
+// DEBUG2-NEXT: [[TMP21:%.*]] = load i32, ptr [[A10]], align 4, !dbg [[DBG280]]
+// DEBUG2-NEXT: [[TMP22:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG281:![0-9]+]]
+// DEBUG2-NEXT: [[ADD11:%.*]] = add nsw i32 [[TMP22]], [[TMP21]], !dbg [[DBG281]]
+// DEBUG2-NEXT: store i32 [[ADD11]], ptr [[RES]], align 4, !dbg [[DBG281]]
+// DEBUG2-NEXT: [[TMP23:%.*]] = load i32, ptr [[RES]], align 4, !dbg [[DBG282:![0-9]+]]
+// DEBUG2-NEXT: ret i32 [[TMP23]], !dbg [[DBG283:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@__cxx_global_var_init.7
-// DEBUG2-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG285:![0-9]+]] {
+// DEBUG2-SAME: () #[[ATTR0]] comdat($_ZN2STI2S4E2stE) !dbg [[DBG284:![0-9]+]] {
// DEBUG2-NEXT: entry:
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG286:![0-9]+]]
-// DEBUG2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG286]]
-// DEBUG2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG286]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i8, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG285:![0-9]+]]
+// DEBUG2-NEXT: [[GUARD_UNINITIALIZED:%.*]] = icmp eq i8 [[TMP0]], 0, !dbg [[DBG285]]
+// DEBUG2-NEXT: br i1 [[GUARD_UNINITIALIZED]], label [[INIT_CHECK:%.*]], label [[INIT_END:%.*]], !dbg [[DBG285]]
// DEBUG2: init.check:
-// DEBUG2-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG286]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB41:[0-9]+]]), !dbg [[DBG286]]
-// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB41]], ptr @_ZN2STI2S4E2stE, ptr @.__kmpc_global_ctor_..8, ptr null, ptr @.__kmpc_global_dtor_..9), !dbg [[DBG286]]
-// DEBUG2-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG287:![0-9]+]]
-// DEBUG2-NEXT: [[TMP2:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG286]]
-// DEBUG2-NEXT: br label [[INIT_END]], !dbg [[DBG286]]
+// DEBUG2-NEXT: store i8 1, ptr @_ZGVN2STI2S4E2stE, align 8, !dbg [[DBG285]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB41:[0-9]+]]), !dbg [[DBG285]]
+// DEBUG2-NEXT: call void @__kmpc_threadprivate_register(ptr @[[GLOB41]], ptr @_ZN2STI2S4E2stE, ptr @.__kmpc_global_ctor_..8, ptr null, ptr @.__kmpc_global_dtor_..9), !dbg [[DBG285]]
+// DEBUG2-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) @_ZN2STI2S4E2stE, i32 noundef 23), !dbg [[DBG286:![0-9]+]]
+// DEBUG2-NEXT: [[TMP2:%.*]] = call i32 @__cxa_atexit(ptr @_ZN2S4D1Ev, ptr @_ZN2STI2S4E2stE, ptr @__dso_handle) #[[ATTR4]], !dbg [[DBG285]]
+// DEBUG2-NEXT: br label [[INIT_END]], !dbg [[DBG285]]
// DEBUG2: init.end:
-// DEBUG2-NEXT: ret void, !dbg [[DBG289:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG288:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_ctor_..8
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG290:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG289:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META291:![0-9]+]], metadata !DIExpression()), !dbg [[DBG292:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG293:![0-9]+]]
-// DEBUG2-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]], i32 noundef 23), !dbg [[DBG294:![0-9]+]]
-// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG293]]
-// DEBUG2-NEXT: ret ptr [[TMP2]], !dbg [[DBG293]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META290:![0-9]+]], metadata !DIExpression()), !dbg [[DBG291:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG292:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S4C1Ei(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]], i32 noundef 23), !dbg [[DBG293:![0-9]+]]
+// DEBUG2-NEXT: [[TMP2:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG292]]
+// DEBUG2-NEXT: ret ptr [[TMP2]], !dbg [[DBG292]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S4C1Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG295:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR2]] comdat align 2 !dbg [[DBG294:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META296:![0-9]+]], metadata !DIExpression()), !dbg [[DBG298:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META295:![0-9]+]], metadata !DIExpression()), !dbg [[DBG297:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META299:![0-9]+]], metadata !DIExpression()), !dbg [[DBG300:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META298:![0-9]+]], metadata !DIExpression()), !dbg [[DBG299:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG301:![0-9]+]]
-// DEBUG2-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG301]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG302:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG300:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S4C2Ei(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]], i32 noundef [[TMP0]]), !dbg [[DBG300]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG301:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@.__kmpc_global_dtor_..9
-// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG303:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef [[TMP0:%.*]]) #[[ATTR0]] !dbg [[DBG302:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[DOTADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[TMP0]], ptr [[DOTADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META304:![0-9]+]], metadata !DIExpression()), !dbg [[DBG305:![0-9]+]]
-// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG305]]
-// DEBUG2-NEXT: call void @_ZN2S4D1Ev(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]]) #[[ATTR4]], !dbg [[DBG305]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG306:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[DOTADDR]], metadata [[META303:![0-9]+]], metadata !DIExpression()), !dbg [[DBG304:![0-9]+]]
+// DEBUG2-NEXT: [[TMP1:%.*]] = load ptr, ptr [[DOTADDR]], align 8, !dbg [[DBG304]]
+// DEBUG2-NEXT: call void @_ZN2S4D1Ev(ptr noundef nonnull align 4 dereferenceable(8) [[TMP1]]) #[[ATTR4]], !dbg [[DBG304]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG305:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S4D1Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG307:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG306:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META308:![0-9]+]], metadata !DIExpression()), !dbg [[DBG309:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META307:![0-9]+]], metadata !DIExpression()), !dbg [[DBG308:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR4]], !dbg [[DBG310:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG311:![0-9]+]]
+// DEBUG2-NEXT: call void @_ZN2S4D2Ev(ptr noundef nonnull align 4 dereferenceable(8) [[THIS1]]) #[[ATTR4]], !dbg [[DBG309:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG310:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S1C2Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG312:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG311:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META313:![0-9]+]], metadata !DIExpression()), !dbg [[DBG314:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META312:![0-9]+]], metadata !DIExpression()), !dbg [[DBG313:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META315:![0-9]+]], metadata !DIExpression()), !dbg [[DBG316:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META314:![0-9]+]], metadata !DIExpression()), !dbg [[DBG315:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG317:![0-9]+]]
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG318:![0-9]+]]
-// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG317]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG319:![0-9]+]]
+// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG316:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG317:![0-9]+]]
+// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG316]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG318:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S1D2Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG320:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(4) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG319:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META321:![0-9]+]], metadata !DIExpression()), !dbg [[DBG322:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META320:![0-9]+]], metadata !DIExpression()), !dbg [[DBG321:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG323:![0-9]+]]
-// DEBUG2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG325:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG326:![0-9]+]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S1:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG322:![0-9]+]]
+// DEBUG2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG324:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG325:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S2C2Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG327:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG326:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META328:![0-9]+]], metadata !DIExpression()), !dbg [[DBG329:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META327:![0-9]+]], metadata !DIExpression()), !dbg [[DBG328:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META330:![0-9]+]], metadata !DIExpression()), !dbg [[DBG331:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META329:![0-9]+]], metadata !DIExpression()), !dbg [[DBG330:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG332:![0-9]+]]
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG333:![0-9]+]]
-// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG332]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG334:![0-9]+]]
+// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG331:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG332:![0-9]+]]
+// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG331]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG333:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S2D2Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG335:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(16) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG334:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META336:![0-9]+]], metadata !DIExpression()), !dbg [[DBG337:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META335:![0-9]+]], metadata !DIExpression()), !dbg [[DBG336:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG338:![0-9]+]]
-// DEBUG2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG340:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG341:![0-9]+]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S2:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG337:![0-9]+]]
+// DEBUG2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG339:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG340:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainC2Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG342:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG341:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META343:![0-9]+]], metadata !DIExpression()), !dbg [[DBG344:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META342:![0-9]+]], metadata !DIExpression()), !dbg [[DBG343:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META345:![0-9]+]], metadata !DIExpression()), !dbg [[DBG346:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META344:![0-9]+]], metadata !DIExpression()), !dbg [[DBG345:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG347:![0-9]+]]
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG348:![0-9]+]]
-// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG347]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG349:![0-9]+]]
+// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG346:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG347:![0-9]+]]
+// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 8, !dbg [[DBG346]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG348:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZZ4mainEN5SmainD2Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG350:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 8 dereferenceable(24) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] align 2 !dbg [[DBG349:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META351:![0-9]+]], metadata !DIExpression()), !dbg [[DBG352:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META350:![0-9]+]], metadata !DIExpression()), !dbg [[DBG351:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG353:![0-9]+]]
-// DEBUG2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG355:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG356:![0-9]+]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_SMAIN:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG352:![0-9]+]]
+// DEBUG2-NEXT: store i32 0, ptr [[A]], align 8, !dbg [[DBG354:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG355:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S4C2Ei
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG357:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]], i32 noundef [[A:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG356:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META358:![0-9]+]], metadata !DIExpression()), !dbg [[DBG359:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META357:![0-9]+]], metadata !DIExpression()), !dbg [[DBG358:![0-9]+]]
// DEBUG2-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META360:![0-9]+]], metadata !DIExpression()), !dbg [[DBG361:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[A_ADDR]], metadata [[META359:![0-9]+]], metadata !DIExpression()), !dbg [[DBG360:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG362:![0-9]+]]
-// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG363:![0-9]+]]
-// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG362]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG364:![0-9]+]]
+// DEBUG2-NEXT: [[A2:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG361:![0-9]+]]
+// DEBUG2-NEXT: [[TMP0:%.*]] = load i32, ptr [[A_ADDR]], align 4, !dbg [[DBG362:![0-9]+]]
+// DEBUG2-NEXT: store i32 [[TMP0]], ptr [[A2]], align 4, !dbg [[DBG361]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG363:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_ZN2S4D2Ev
-// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG365:![0-9]+]] {
+// DEBUG2-SAME: (ptr noundef nonnull align 4 dereferenceable(8) [[THIS:%.*]]) unnamed_addr #[[ATTR3]] comdat align 2 !dbg [[DBG364:![0-9]+]] {
// DEBUG2-NEXT: entry:
// DEBUG2-NEXT: [[THIS_ADDR:%.*]] = alloca ptr, align 8
// DEBUG2-NEXT: store ptr [[THIS]], ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META366:![0-9]+]], metadata !DIExpression()), !dbg [[DBG367:![0-9]+]]
+// DEBUG2-NEXT: tail call void @llvm.dbg.declare(metadata ptr [[THIS_ADDR]], metadata [[META365:![0-9]+]], metadata !DIExpression()), !dbg [[DBG366:![0-9]+]]
// DEBUG2-NEXT: [[THIS1:%.*]] = load ptr, ptr [[THIS_ADDR]], align 8
-// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG368:![0-9]+]]
-// DEBUG2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG370:![0-9]+]]
-// DEBUG2-NEXT: ret void, !dbg [[DBG371:![0-9]+]]
+// DEBUG2-NEXT: [[A:%.*]] = getelementptr inbounds [[STRUCT_S4:%.*]], ptr [[THIS1]], i32 0, i32 0, !dbg [[DBG367:![0-9]+]]
+// DEBUG2-NEXT: store i32 0, ptr [[A]], align 4, !dbg [[DBG369:![0-9]+]]
+// DEBUG2-NEXT: ret void, !dbg [[DBG370:![0-9]+]]
//
//
// DEBUG2-LABEL: define {{[^@]+}}@_GLOBAL__sub_I_threadprivate_codegen.cpp
-// DEBUG2-SAME: () #[[ATTR0]] !dbg [[DBG372:![0-9]+]] {
+// DEBUG2-SAME: () #[[ATTR0]] !dbg [[DBG371:![0-9]+]] {
// DEBUG2-NEXT: entry:
-// DEBUG2-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG373:![0-9]+]]
-// DEBUG2-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG373]]
-// DEBUG2-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG373]]
+// DEBUG2-NEXT: call void @__cxx_global_var_init(), !dbg [[DBG372:![0-9]+]]
+// DEBUG2-NEXT: call void @__cxx_global_var_init.1(), !dbg [[DBG372]]
+// DEBUG2-NEXT: call void @__cxx_global_var_init.2(), !dbg [[DBG372]]
// DEBUG2-NEXT: ret void
//
diff --git a/clang/test/PCH/pack_indexing.cpp b/clang/test/PCH/pack_indexing.cpp
index cf8124617b3c..1c4dac0fd9a3 100644
--- a/clang/test/PCH/pack_indexing.cpp
+++ b/clang/test/PCH/pack_indexing.cpp
@@ -10,7 +10,11 @@ using Type = U...[I];
template <int I, auto...V>
constexpr auto Var = V...[I];
+template <int I, auto...V>
+decltype(V...[I]) foo() { return V...[I]; }
+
void fn1() {
using A = Type<1, int, long, double>;
constexpr auto V = Var<2, 0, 1, 42>;
+ foo<2, 0, 1, 42>();
}
diff --git a/clang/test/Parser/altivec.c b/clang/test/Parser/altivec.c
index 445369f0dc06..9291b9b69160 100644
--- a/clang/test/Parser/altivec.c
+++ b/clang/test/Parser/altivec.c
@@ -56,40 +56,40 @@ void f_a2(int b, vector int a);
vector int v = (vector int)(-1);
// These should have errors on AIX and warnings otherwise.
-__vector long vv_l; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector long vv_l; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector signed long vv_sl; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector signed long vv_sl; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector unsigned long vv_ul; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector unsigned long vv_ul; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector long int vv_li; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector long int vv_li; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector signed long int vv_sli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector signed long int vv_sli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector unsigned long int vv_uli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector unsigned long int vv_uli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector long v_l; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector long v_l; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector signed long v_sl; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector signed long v_sl; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector unsigned long v_ul; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector unsigned long v_ul; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector long int v_li; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector long int v_li; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector signed long int v_sli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector signed long int v_sli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector unsigned long int v_uli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector unsigned long int v_uli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
diff --git a/clang/test/Parser/cxx-altivec.cpp b/clang/test/Parser/cxx-altivec.cpp
index 5cb760dababb..15a6bf6d1be8 100644
--- a/clang/test/Parser/cxx-altivec.cpp
+++ b/clang/test/Parser/cxx-altivec.cpp
@@ -59,40 +59,40 @@ void f_a2(int b, vector int a);
vector int v = (vector int)(-1);
// These should have errors on AIX and warnings otherwise.
-__vector long vv_l; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector long vv_l; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector signed long vv_sl; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector signed long vv_sl; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector unsigned long vv_ul; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector unsigned long vv_ul; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector long int vv_li; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector long int vv_li; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector signed long int vv_sli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector signed long int vv_sli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-__vector unsigned long int vv_uli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+__vector unsigned long int vv_uli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector long v_l; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector long v_l; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector signed long v_sl; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector signed long v_sl; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector unsigned long v_ul; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector unsigned long v_ul; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector long int v_li; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector long int v_li; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector signed long int v_sli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector signed long int v_sli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
-vector unsigned long int v_uli; // nonaix-warning {{Use of 'long' with '__vector' is deprecated}}
+vector unsigned long int v_uli; // nonaix-warning {{use of 'long' with '__vector' is deprecated}}
// aix-error@-1 {{cannot use 'long' with '__vector'}}
// novsx-error@-2 {{cannot use 'long' with '__vector'}}
diff --git a/clang/test/Parser/lax-conv.cpp b/clang/test/Parser/lax-conv.cpp
index f784e3fa74e7..0cb2503a9691 100644
--- a/clang/test/Parser/lax-conv.cpp
+++ b/clang/test/Parser/lax-conv.cpp
@@ -21,10 +21,10 @@ template <typename VEC> VEC __attribute__((noinline)) test(vector unsigned char
return (VEC)(a * b);
}
vector unsigned int test1(vector unsigned char RetImplicitConv) {
- return RetImplicitConv; // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ return RetImplicitConv; // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
vector unsigned int test2(vector unsigned char RetImplicitConvAddConst) {
- return RetImplicitConvAddConst + 5; // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ return RetImplicitConvAddConst + 5; // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
vector unsigned int test3(vector unsigned char RetExplicitConv) {
return (vector unsigned int)RetExplicitConv;
@@ -34,7 +34,7 @@ vector unsigned int test4(vector unsigned char RetExplicitConvAddConst) {
}
vector unsigned int test5(vector unsigned char RetImplicitConvAddSame1,
vector unsigned char RetImplicitConvAddSame2) {
- return RetImplicitConvAddSame1 + RetImplicitConvAddSame2; // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ return RetImplicitConvAddSame1 + RetImplicitConvAddSame2; // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
vector unsigned int test6(vector unsigned char RetExplicitConvAddSame1,
vector unsigned char RetExplicitConvAddSame2) {
@@ -54,10 +54,10 @@ vector unsigned long long test9(vector unsigned char a, vector unsigned char b)
return test<vector unsigned long long>(a, b);
}
void test1a(vector unsigned char ArgImplicitConv) {
- return dummy(ArgImplicitConv); // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ return dummy(ArgImplicitConv); // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
void test2a(vector unsigned char ArgImplicitConvAddConst) {
- return dummy(ArgImplicitConvAddConst + 5); // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ return dummy(ArgImplicitConvAddConst + 5); // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
void test3a(vector unsigned char ArgExplicitConv) {
return dummy((vector unsigned int)ArgExplicitConv);
@@ -67,7 +67,7 @@ void test4a(vector unsigned char ArgExplicitConvAddConst) {
}
void test5a(vector unsigned char ArgImplicitConvAddSame1,
vector unsigned char ArgImplicitConvAddSame2) {
- return dummy(ArgImplicitConvAddSame1 + ArgImplicitConvAddSame2); // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ return dummy(ArgImplicitConvAddSame1 + ArgImplicitConvAddSame2); // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
void test6a(vector unsigned char ArgExplicitConvAddSame1,
vector unsigned char ArgExplicitConvAddSame2) {
@@ -80,33 +80,33 @@ void test7a(vector unsigned char ArgExplicitConvAddSame1Full,
ArgExplicitConvAddSame2Full));
}
void test_bool_compat(void) {
- vbs = vss; // expected-warning {{Implicit conversion between vector types (''__vector short' (vector of 8 'short' values)' and ''__vector __bool unsigned short' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vbs = vus; // expected-warning {{Implicit conversion between vector types (''__vector unsigned short' (vector of 8 'unsigned short' values)' and ''__vector __bool unsigned short' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vbs = vss; // expected-warning {{implicit conversion between vector types (''__vector short' (vector of 8 'short' values)' and ''__vector __bool unsigned short' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vbs = vus; // expected-warning {{implicit conversion between vector types (''__vector unsigned short' (vector of 8 'unsigned short' values)' and ''__vector __bool unsigned short' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
- vbi = vsi; // expected-warning {{Implicit conversion between vector types (''__vector int' (vector of 4 'int' values)' and ''__vector __bool unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vbi = vui; // expected-warning {{Implicit conversion between vector types (''__vector unsigned int' (vector of 4 'unsigned int' values)' and ''__vector __bool unsigned int' (vector of 4 'unsigned int' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vbi = vsi; // expected-warning {{implicit conversion between vector types (''__vector int' (vector of 4 'int' values)' and ''__vector __bool unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vbi = vui; // expected-warning {{implicit conversion between vector types (''__vector unsigned int' (vector of 4 'unsigned int' values)' and ''__vector __bool unsigned int' (vector of 4 'unsigned int' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
- vbl = vsl; // expected-warning {{Implicit conversion between vector types (''__vector long long' (vector of 2 'long long' values)' and ''__vector __bool unsigned long long' (vector of 2 'unsigned long long' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vbl = vul; // expected-warning {{Implicit conversion between vector types (''__vector unsigned long long' (vector of 2 'unsigned long long' values)' and ''__vector __bool unsigned long long' (vector of 2 'unsigned long long' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vbl = vsl; // expected-warning {{implicit conversion between vector types (''__vector long long' (vector of 2 'long long' values)' and ''__vector __bool unsigned long long' (vector of 2 'unsigned long long' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vbl = vul; // expected-warning {{implicit conversion between vector types (''__vector unsigned long long' (vector of 2 'unsigned long long' values)' and ''__vector __bool unsigned long long' (vector of 2 'unsigned long long' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
- vbc = vsc; // expected-warning {{Implicit conversion between vector types (''__vector signed char' (vector of 16 'signed char' values)' and ''__vector __bool unsigned char' (vector of 16 'unsigned char' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vbc = vuc; // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector __bool unsigned char' (vector of 16 'unsigned char' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vbc = vsc; // expected-warning {{implicit conversion between vector types (''__vector signed char' (vector of 16 'signed char' values)' and ''__vector __bool unsigned char' (vector of 16 'unsigned char' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vbc = vuc; // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector __bool unsigned char' (vector of 16 'unsigned char' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
void test_pixel_compat(void) {
- vp = vbs; // expected-warning {{Implicit conversion between vector types (''__vector __bool unsigned short' (vector of 8 'unsigned short' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vss; // expected-warning {{Implicit conversion between vector types (''__vector short' (vector of 8 'short' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vus; // expected-warning {{Implicit conversion between vector types (''__vector unsigned short' (vector of 8 'unsigned short' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vp = vbs; // expected-warning {{implicit conversion between vector types (''__vector __bool unsigned short' (vector of 8 'unsigned short' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vss; // expected-warning {{implicit conversion between vector types (''__vector short' (vector of 8 'short' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vus; // expected-warning {{implicit conversion between vector types (''__vector unsigned short' (vector of 8 'unsigned short' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
- vp = vbi; // expected-warning {{Implicit conversion between vector types (''__vector __bool unsigned int' (vector of 4 'unsigned int' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vsi; // expected-warning {{Implicit conversion between vector types (''__vector int' (vector of 4 'int' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vui; // expected-warning {{Implicit conversion between vector types (''__vector unsigned int' (vector of 4 'unsigned int' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vp = vbi; // expected-warning {{implicit conversion between vector types (''__vector __bool unsigned int' (vector of 4 'unsigned int' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vsi; // expected-warning {{implicit conversion between vector types (''__vector int' (vector of 4 'int' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vui; // expected-warning {{implicit conversion between vector types (''__vector unsigned int' (vector of 4 'unsigned int' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
- vp = vbl; // expected-warning {{Implicit conversion between vector types (''__vector __bool unsigned long long' (vector of 2 'unsigned long long' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vsl; // expected-warning {{Implicit conversion between vector types (''__vector long long' (vector of 2 'long long' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vul; // expected-warning {{Implicit conversion between vector types (''__vector unsigned long long' (vector of 2 'unsigned long long' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vp = vbl; // expected-warning {{implicit conversion between vector types (''__vector __bool unsigned long long' (vector of 2 'unsigned long long' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vsl; // expected-warning {{implicit conversion between vector types (''__vector long long' (vector of 2 'long long' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vul; // expected-warning {{implicit conversion between vector types (''__vector unsigned long long' (vector of 2 'unsigned long long' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
- vp = vbc; // expected-warning {{Implicit conversion between vector types (''__vector __bool unsigned char' (vector of 16 'unsigned char' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vsc; // expected-warning {{Implicit conversion between vector types (''__vector signed char' (vector of 16 'signed char' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
- vp = vuc; // expected-warning {{Implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated. In the future, the behavior implied by '-fno-lax-vector-conversions' will be the default.}}
+ vp = vbc; // expected-warning {{implicit conversion between vector types (''__vector __bool unsigned char' (vector of 16 'unsigned char' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vsc; // expected-warning {{implicit conversion between vector types (''__vector signed char' (vector of 16 'signed char' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
+ vp = vuc; // expected-warning {{implicit conversion between vector types (''__vector unsigned char' (vector of 16 'unsigned char' values)' and ''__vector __pixel ' (vector of 8 'unsigned short' values)') is deprecated; in the future, the behavior implied by '-fno-lax-vector-conversions' will be the default}}
}
diff --git a/clang/test/ParserOpenACC/parse-clauses.c b/clang/test/ParserOpenACC/parse-clauses.c
index 694f28b86ec9..49e749feb2ec 100644
--- a/clang/test/ParserOpenACC/parse-clauses.c
+++ b/clang/test/ParserOpenACC/parse-clauses.c
@@ -831,52 +831,38 @@ void ReductionClauseParsing() {
// expected-error@+1{{expected '('}}
#pragma acc serial reduction
for(;;){}
- // expected-error@+3{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
- // expected-error@+2{{expected expression}}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
+ // expected-error@+2{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
+ // expected-error@+1{{expected expression}}
#pragma acc serial reduction()
for(;;){}
- // expected-error@+2{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
+ // expected-error@+1{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
#pragma acc serial reduction(Begin)
for(;;){}
- // expected-error@+2{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
+ // expected-error@+1{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
#pragma acc serial reduction(Begin, End)
for(;;){}
- // expected-error@+2{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
+ // expected-error@+1{{missing reduction operator, expected '+', '*', 'max', 'min', '&', '|', '^', '&&', or '||', follwed by a ':'}}
#pragma acc serial reduction(Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(+:Begin)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(+:Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(*: Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(max : Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(min: Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(&: Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(|: Begin, End)
for(;;){}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
#pragma acc serial reduction(^: Begin, End)
for(;;){}
- // expected-warning@+2{{OpenACC clause 'seq' not yet implemented, clause ignored}}
- // expected-warning@+1{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
+ // expected-warning@+1{{OpenACC clause 'seq' not yet implemented, clause ignored}}
#pragma acc serial seq, reduction(&&: Begin, End)
for(;;){}
- // expected-warning@+2{{OpenACC clause 'reduction' not yet implemented, clause ignored}}
// expected-warning@+1{{OpenACC clause 'seq' not yet implemented, clause ignored}}
#pragma acc serial reduction(||: Begin, End), seq
for(;;){}
diff --git a/clang/test/Preprocessor/riscv-target-features.c b/clang/test/Preprocessor/riscv-target-features.c
index 913093bb51db..0865add7e8fb 100644
--- a/clang/test/Preprocessor/riscv-target-features.c
+++ b/clang/test/Preprocessor/riscv-target-features.c
@@ -79,7 +79,9 @@
// CHECK-NOT: __riscv_xventanacondops {{.*$}}
// CHECK-NOT: __riscv_za128rs {{.*$}}
// CHECK-NOT: __riscv_za64rs {{.*$}}
+// CHECK-NOT: __riscv_zaamo {{.*$}}
// CHECK-NOT: __riscv_zacas {{.*$}}
+// CHECK-NOT: __riscv_zalrsc {{.*$}}
// CHECK-NOT: __riscv_zama16b {{.*$}}
// CHECK-NOT: __riscv_zawrs {{.*$}}
// CHECK-NOT: __riscv_zba {{.*$}}
@@ -174,10 +176,8 @@
// CHECK-NOT: __riscv_sspm{{.*$}}
// CHECK-NOT: __riscv_ssqosid{{.*$}}
// CHECK-NOT: __riscv_supm{{.*$}}
-// CHECK-NOT: __riscv_zaamo {{.*$}}
// CHECK-NOT: __riscv_zabha {{.*$}}
// CHECK-NOT: __riscv_zalasr {{.*$}}
-// CHECK-NOT: __riscv_zalrsc {{.*$}}
// CHECK-NOT: __riscv_zfbfmin {{.*$}}
// CHECK-NOT: __riscv_zicfilp {{.*$}}
// CHECK-NOT: __riscv_zicfiss {{.*$}}
@@ -708,6 +708,14 @@
// CHECK-ZA64RS-EXT: __riscv_za64rs 1000000{{$}}
// RUN: %clang --target=riscv32 \
+// RUN: -march=rv32i_zaamo1p0 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZAAMO-EXT %s
+// RUN: %clang --target=riscv64 \
+// RUN: -march=rv64i_zaamo1p0 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZAAMO-EXT %s
+// CHECK-ZAAMO-EXT: __riscv_zaamo 1000000{{$}}
+
+// RUN: %clang --target=riscv32 \
// RUN: -march=rv32ia_zacas1p0 -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZACAS-EXT %s
// RUN: %clang --target=riscv64 \
@@ -715,6 +723,14 @@
// RUN: -o - | FileCheck --check-prefix=CHECK-ZACAS-EXT %s
// CHECK-ZACAS-EXT: __riscv_zacas 1000000{{$}}
+// RUN: %clang --target=riscv32 \
+// RUN: -march=rv32i_zalrsc1p0 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZALRSC-EXT %s
+// RUN: %clang --target=riscv64 \
+// RUN: -march=rv64i_zalrsc1p0 -E -dM %s \
+// RUN: -o - | FileCheck --check-prefix=CHECK-ZALRSC-EXT %s
+// CHECK-ZALRSC-EXT: __riscv_zalrsc 1000000{{$}}
+
// RUN: %clang --target=riscv32 -march=rv32izama16b -x c -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZAMA16B-EXT %s
// RUN: %clang --target=riscv64 -march=rv64izama16b -x c -E -dM %s \
@@ -1555,14 +1571,6 @@
// Experimental extensions
// RUN: %clang --target=riscv32 -menable-experimental-extensions \
-// RUN: -march=rv32i_zaamo0p2 -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZAAMO-EXT %s
-// RUN: %clang --target=riscv64 -menable-experimental-extensions \
-// RUN: -march=rv64i_zaamo0p2 -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZAAMO-EXT %s
-// CHECK-ZAAMO-EXT: __riscv_zaamo 2000{{$}}
-
-// RUN: %clang --target=riscv32 -menable-experimental-extensions \
// RUN: -march=rv32ia_zabha1p0 -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZABHA-EXT %s
// RUN: %clang --target=riscv64 -menable-experimental-extensions \
@@ -1579,14 +1587,6 @@
// CHECK-ZALASR-EXT: __riscv_zalasr 1000{{$}}
// RUN: %clang --target=riscv32 -menable-experimental-extensions \
-// RUN: -march=rv32i_zalrsc0p2 -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZALRSC-EXT %s
-// RUN: %clang --target=riscv64 -menable-experimental-extensions \
-// RUN: -march=rv64i_zalrsc0p2 -E -dM %s \
-// RUN: -o - | FileCheck --check-prefix=CHECK-ZALRSC-EXT %s
-// CHECK-ZALRSC-EXT: __riscv_zalrsc 2000{{$}}
-
-// RUN: %clang --target=riscv32 -menable-experimental-extensions \
// RUN: -march=rv32izfbfmin1p0 -E -dM %s \
// RUN: -o - | FileCheck --check-prefix=CHECK-ZFBFMIN-EXT %s
// RUN: %clang --target=riscv64 -menable-experimental-extensions \
diff --git a/clang/test/Sema/builtins.c b/clang/test/Sema/builtins.c
index 3bee31459529..4f843aeec24e 100644
--- a/clang/test/Sema/builtins.c
+++ b/clang/test/Sema/builtins.c
@@ -277,9 +277,9 @@ void test21(const int *ptr) {
}
void test_ei_i42i(_BitInt(42) *ptr, int value) {
- __sync_fetch_and_add(ptr, value); // expected-error {{Atomic memory operand must have a power-of-two size}}
+ __sync_fetch_and_add(ptr, value); // expected-error {{atomic memory operand must have a power-of-two size}}
// expected-warning@+1 {{the semantics of this intrinsic changed with GCC version 4.4 - the newer semantics are provided here}}
- __sync_nand_and_fetch(ptr, value); // expected-error {{Atomic memory operand must have a power-of-two size}}
+ __sync_nand_and_fetch(ptr, value); // expected-error {{atomic memory operand must have a power-of-two size}}
__atomic_fetch_add(ptr, 1, 0); // expected-error {{argument to atomic builtin of type '_BitInt' is not supported}}
}
@@ -305,9 +305,9 @@ void test_ei_ii64(int *ptr, _BitInt(64) value) {
}
void test_ei_i42i42(_BitInt(42) *ptr, _BitInt(42) value) {
- __sync_fetch_and_add(ptr, value); // expected-error {{Atomic memory operand must have a power-of-two size}}
+ __sync_fetch_and_add(ptr, value); // expected-error {{atomic memory operand must have a power-of-two size}}
// expected-warning@+1 {{the semantics of this intrinsic changed with GCC version 4.4 - the newer semantics are provided here}}
- __sync_nand_and_fetch(ptr, value); // expected-error {{Atomic memory operand must have a power-of-two size}}
+ __sync_nand_and_fetch(ptr, value); // expected-error {{atomic memory operand must have a power-of-two size}}
}
void test_ei_i64i64(_BitInt(64) *ptr, _BitInt(64) value) {
diff --git a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
index 285532e3d80d..4c6ef5adae7d 100644
--- a/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
+++ b/clang/test/SemaCXX/cxx20-ctad-type-alias.cpp
@@ -173,6 +173,11 @@ template <typename... Ts>
using AFoo = Foo<Ts...>;
auto b = AFoo{};
+AFoo a(1, 2);
+
+template <typename T>
+using BFoo = Foo<T, T>;
+BFoo b2(1.0, 2.0);
} // namespace test13
namespace test14 {
diff --git a/clang/test/SemaCXX/cxx2c-pack-indexing.cpp b/clang/test/SemaCXX/cxx2c-pack-indexing.cpp
index 0ac85b5bcc14..28b9765127f4 100644
--- a/clang/test/SemaCXX/cxx2c-pack-indexing.cpp
+++ b/clang/test/SemaCXX/cxx2c-pack-indexing.cpp
@@ -206,13 +206,17 @@ void test(auto...args){
template<int... args>
void test2(){
[&]<int idx>(){
- using R = decltype( args...[idx] ) ;
- }.template operator()<0>();
+ using R = decltype( args...[idx] ) ; // #test2-R
+ }.template operator()<0>(); // #test2-call
}
void f( ) {
test(1);
test2<1>();
+ test2();
+ // expected-error@#test2-R {{invalid index 0 for pack args of size 0}}
+ // expected-note@#test2-call {{requested here}}
+ // expected-note@-3 {{requested here}}
}
diff --git a/clang/test/SemaCXX/warn-thread-safety-analysis.cpp b/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
index 749d9e135d94..73cc946ca0ce 100644
--- a/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
+++ b/clang/test/SemaCXX/warn-thread-safety-analysis.cpp
@@ -5838,12 +5838,12 @@ class Foo5 {
class Foo6 {
- Mutex mu1 ACQUIRED_AFTER(mu3); // expected-warning {{Cycle in acquired_before/after dependencies, starting with 'mu1'}}
- Mutex mu2 ACQUIRED_AFTER(mu1); // expected-warning {{Cycle in acquired_before/after dependencies, starting with 'mu2'}}
- Mutex mu3 ACQUIRED_AFTER(mu2); // expected-warning {{Cycle in acquired_before/after dependencies, starting with 'mu3'}}
+ Mutex mu1 ACQUIRED_AFTER(mu3); // expected-warning {{cycle in acquired_before/after dependencies, starting with 'mu1'}}
+ Mutex mu2 ACQUIRED_AFTER(mu1); // expected-warning {{cycle in acquired_before/after dependencies, starting with 'mu2'}}
+ Mutex mu3 ACQUIRED_AFTER(mu2); // expected-warning {{cycle in acquired_before/after dependencies, starting with 'mu3'}}
- Mutex mu_b ACQUIRED_BEFORE(mu_b); // expected-warning {{Cycle in acquired_before/after dependencies, starting with 'mu_b'}}
- Mutex mu_a ACQUIRED_AFTER(mu_a); // expected-warning {{Cycle in acquired_before/after dependencies, starting with 'mu_a'}}
+ Mutex mu_b ACQUIRED_BEFORE(mu_b); // expected-warning {{cycle in acquired_before/after dependencies, starting with 'mu_b'}}
+ Mutex mu_a ACQUIRED_AFTER(mu_a); // expected-warning {{cycle in acquired_before/after dependencies, starting with 'mu_a'}}
void test0() {
mu_a.Lock();
diff --git a/clang/test/SemaOpenACC/compute-construct-attach-clause.c b/clang/test/SemaOpenACC/compute-construct-attach-clause.c
index de735308528a..deca99f5bae4 100644
--- a/clang/test/SemaOpenACC/compute-construct-attach-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-attach-clause.c
@@ -16,7 +16,7 @@ void uses() {
#pragma acc parallel attach(LocalInt)
while (1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel attach(&LocalInt)
while (1);
diff --git a/clang/test/SemaOpenACC/compute-construct-clause-ast.cpp b/clang/test/SemaOpenACC/compute-construct-clause-ast.cpp
index 6d2efcf81eb6..69f65f4083ae 100644
--- a/clang/test/SemaOpenACC/compute-construct-clause-ast.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-clause-ast.cpp
@@ -40,6 +40,89 @@ void NormalFunc(int i, float f) {
// CHECK-NEXT: WhileStmt
// CHECK-NEXT: CXXBoolLiteralExpr
// CHECK-NEXT: NullStmt
+
+#pragma acc parallel reduction(+: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: +
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'int' lvalue ParmVar{{.*}} 'i' 'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(*: f)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: *
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'float' lvalue ParmVar{{.*}} 'f' 'float'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc parallel reduction(max: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: max
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'int' lvalue ParmVar{{.*}} 'i' 'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(min: f)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: min
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'float' lvalue ParmVar{{.*}} 'f' 'float'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc parallel reduction(&: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: &
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'int' lvalue ParmVar{{.*}} 'i' 'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(|: f)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: |
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'float' lvalue ParmVar{{.*}} 'f' 'float'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+
+#pragma acc parallel reduction(^: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: ^
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'int' lvalue ParmVar{{.*}} 'i' 'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(&&: f)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: &&
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'float' lvalue ParmVar{{.*}} 'f' 'float'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+
+#pragma acc parallel reduction(||: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: ||
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'int' lvalue ParmVar{{.*}} 'i' 'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
}
template<typename T>
@@ -154,6 +237,98 @@ void TemplFunc() {
// CHECK-NEXT: CXXBoolLiteralExpr
// CHECK-NEXT: NullStmt
+ T t;
+ // CHECK-NEXT: DeclStmt
+ // CHECK-NEXT: VarDecl{{.*}} t 'T'
+
+#pragma acc parallel reduction(+: t)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: +
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'T' lvalue Var{{.*}} 't' 'T'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(*: T::SomeFloat)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: *
+ // CHECK-NEXT: DependentScopeDeclRefExpr{{.*}} '<dependent type>' lvalue
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec 'T'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ typename T::IntTy i;
+ // CHECK-NEXT: DeclStmt
+ // CHECK-NEXT: VarDecl{{.*}} i 'typename T::IntTy'
+
+#pragma acc parallel reduction(max: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: max
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'typename T::IntTy' lvalue Var{{.*}} 'i' 'typename T::IntTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(min: t)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: min
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'T' lvalue Var{{.*}} 't' 'T'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc parallel reduction(&: T::SomeFloat)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: &
+ // CHECK-NEXT: DependentScopeDeclRefExpr{{.*}} '<dependent type>' lvalue
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec 'T'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(|: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: |
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'typename T::IntTy' lvalue Var{{.*}} 'i' 'typename T::IntTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc parallel reduction(^: t)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: ^
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'T' lvalue Var{{.*}} 't' 'T'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc serial reduction(&&: T::SomeFloat)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: &&
+ // CHECK-NEXT: DependentScopeDeclRefExpr{{.*}} '<dependent type>' lvalue
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec 'T'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+#pragma acc parallel reduction(||: i)
+ while(true);
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: ||
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'typename T::IntTy' lvalue Var{{.*}} 'i' 'typename T::IntTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
// Match the instantiation:
// CHECK: FunctionDecl{{.*}}TemplFunc{{.*}}implicit_instantiation
// CHECK-NEXT: TemplateArgument type 'InstTy'
@@ -262,6 +437,79 @@ void TemplFunc() {
// CHECK-NEXT: CXXBoolLiteralExpr
// CHECK-NEXT: NullStmt
+ // CHECK-NEXT: DeclStmt
+ // CHECK-NEXT: VarDecl{{.*}} t 'InstTy'
+ // CHECK-NEXT: CXXConstructExpr
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: +
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'InstTy' lvalue Var{{.*}} 't' 'InstTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: *
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'const float' lvalue Var{{.*}} 'SomeFloat' 'const float'
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec 'InstTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: DeclStmt
+ // CHECK-NEXT: VarDecl{{.*}} i 'typename InstTy::IntTy':'int'
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: max
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'typename InstTy::IntTy':'int' lvalue Var{{.*}} 'i' 'typename InstTy::IntTy':'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: min
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'InstTy' lvalue Var{{.*}} 't' 'InstTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: &
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'const float' lvalue Var{{.*}} 'SomeFloat' 'const float'
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec 'InstTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: |
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'typename InstTy::IntTy':'int' lvalue Var{{.*}} 'i' 'typename InstTy::IntTy':'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: ^
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'InstTy' lvalue Var{{.*}} 't' 'InstTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}serial
+ // CHECK-NEXT: reduction clause Operator: &&
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'const float' lvalue Var{{.*}} 'SomeFloat' 'const float'
+ // CHECK-NEXT: NestedNameSpecifier TypeSpec 'InstTy'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
+ // CHECK-NEXT: OpenACCComputeConstruct{{.*}}parallel
+ // CHECK-NEXT: reduction clause Operator: ||
+ // CHECK-NEXT: DeclRefExpr{{.*}} 'typename InstTy::IntTy':'int' lvalue Var{{.*}} 'i' 'typename InstTy::IntTy':'int'
+ // CHECK-NEXT: WhileStmt
+ // CHECK-NEXT: CXXBoolLiteralExpr
+ // CHECK-NEXT: NullStmt
+
}
struct BoolConversion{ operator bool() const;};
diff --git a/clang/test/SemaOpenACC/compute-construct-copy-clause.c b/clang/test/SemaOpenACC/compute-construct-copy-clause.c
index accbe43cea40..2b43480be8b4 100644
--- a/clang/test/SemaOpenACC/compute-construct-copy-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-copy-clause.c
@@ -36,11 +36,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel copy(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy(+IntParam)
while(1);
@@ -53,10 +53,10 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy((float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-copy-clause.cpp b/clang/test/SemaOpenACC/compute-construct-copy-clause.cpp
index 16e78a43026a..2797927e6e56 100644
--- a/clang/test/SemaOpenACC/compute-construct-copy-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-copy-clause.cpp
@@ -31,11 +31,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel copy(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy(+IntParam)
while(1);
@@ -48,27 +48,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel copy(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copy(t, I)
while(true);
@@ -93,7 +93,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel copy(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-copyin-clause.c b/clang/test/SemaOpenACC/compute-construct-copyin-clause.c
index 6f200b357f52..5ea4db9e5fae 100644
--- a/clang/test/SemaOpenACC/compute-construct-copyin-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-copyin-clause.c
@@ -38,11 +38,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel copyin(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(+IntParam)
while(1);
@@ -55,14 +55,14 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin((float)ArrayParam[2])
while(1);
// expected-error@+2{{invalid tag 'invalid' on 'copyin' clause}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(invalid:(float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-copyin-clause.cpp b/clang/test/SemaOpenACC/compute-construct-copyin-clause.cpp
index 79275e701161..74ce74a1368d 100644
--- a/clang/test/SemaOpenACC/compute-construct-copyin-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-copyin-clause.cpp
@@ -31,11 +31,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel copyin(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(+IntParam)
while(1);
@@ -48,27 +48,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel copyin(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyin(t, I)
while(true);
@@ -93,7 +93,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel copyin(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-copyout-clause.c b/clang/test/SemaOpenACC/compute-construct-copyout-clause.c
index 38a50f8373e8..a035ab3242e3 100644
--- a/clang/test/SemaOpenACC/compute-construct-copyout-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-copyout-clause.c
@@ -38,11 +38,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel copyout(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(+IntParam)
while(1);
@@ -55,14 +55,14 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout((float)ArrayParam[2])
while(1);
// expected-error@+2{{invalid tag 'invalid' on 'copyout' clause}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(invalid:(float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-copyout-clause.cpp b/clang/test/SemaOpenACC/compute-construct-copyout-clause.cpp
index 3d05a5670092..c01dc1a39963 100644
--- a/clang/test/SemaOpenACC/compute-construct-copyout-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-copyout-clause.cpp
@@ -31,11 +31,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel copyout(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(+IntParam)
while(1);
@@ -48,27 +48,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel copyout(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel copyout(t, I)
while(true);
@@ -93,7 +93,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel copyout(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-create-clause.c b/clang/test/SemaOpenACC/compute-construct-create-clause.c
index 9c94e3a1a407..5cfa9b0c5cc3 100644
--- a/clang/test/SemaOpenACC/compute-construct-create-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-create-clause.c
@@ -39,11 +39,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel create(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(+IntParam)
while(1);
@@ -56,14 +56,14 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create((float)ArrayParam[2])
while(1);
// expected-error@+2{{invalid tag 'invalid' on 'create' clause}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(invalid:(float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-create-clause.cpp b/clang/test/SemaOpenACC/compute-construct-create-clause.cpp
index d0323620b8f7..3ed1e1e9f700 100644
--- a/clang/test/SemaOpenACC/compute-construct-create-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-create-clause.cpp
@@ -31,11 +31,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel create(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(+IntParam)
while(1);
@@ -48,27 +48,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel create(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel create(t, I)
while(true);
@@ -93,7 +93,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel create(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-device_type-clause.c b/clang/test/SemaOpenACC/compute-construct-device_type-clause.c
index 15c9cf396c80..bf2a00a0f736 100644
--- a/clang/test/SemaOpenACC/compute-construct-device_type-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-device_type-clause.c
@@ -182,7 +182,7 @@ void uses() {
while(1);
// expected-error@+2{{OpenACC clause 'reduction' may not follow a 'device_type' clause in a compute construct}}
// expected-note@+1{{previous clause is here}}
-#pragma acc kernels device_type(*) reduction(+:Var)
+#pragma acc serial device_type(*) reduction(+:Var)
while(1);
// expected-error@+2{{OpenACC clause 'collapse' may not follow a 'device_type' clause in a compute construct}}
// expected-note@+1{{previous clause is here}}
diff --git a/clang/test/SemaOpenACC/compute-construct-deviceptr-clause.c b/clang/test/SemaOpenACC/compute-construct-deviceptr-clause.c
index e5d328eb0b28..ae8269b9779a 100644
--- a/clang/test/SemaOpenACC/compute-construct-deviceptr-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-deviceptr-clause.c
@@ -16,7 +16,7 @@ void uses() {
#pragma acc parallel deviceptr(LocalInt)
while (1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel deviceptr(&LocalInt)
while (1);
diff --git a/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.c b/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.c
index 4e057bf32c2d..eacda7bbbbba 100644
--- a/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.c
@@ -29,11 +29,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel firstprivate(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate(+IntParam)
while(1);
@@ -46,10 +46,10 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate((float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.cpp b/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.cpp
index 2fbb80f7b2fb..161e4012c08d 100644
--- a/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-firstprivate-clause.cpp
@@ -32,11 +32,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel firstprivate(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate(+IntParam)
while(1);
@@ -49,27 +49,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel firstprivate((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel private(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(t, I)
while(true);
@@ -94,7 +94,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel private(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-no_create-clause.c b/clang/test/SemaOpenACC/compute-construct-no_create-clause.c
index 07a60b73c34f..4ff06eaf132b 100644
--- a/clang/test/SemaOpenACC/compute-construct-no_create-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-no_create-clause.c
@@ -28,11 +28,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel no_create(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create(+IntParam)
while(1);
@@ -45,10 +45,10 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create((float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-no_create-clause.cpp b/clang/test/SemaOpenACC/compute-construct-no_create-clause.cpp
index 3820d5e3999d..fa84b1fbeda0 100644
--- a/clang/test/SemaOpenACC/compute-construct-no_create-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-no_create-clause.cpp
@@ -31,11 +31,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel no_create(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create(+IntParam)
while(1);
@@ -48,27 +48,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel no_create(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel no_create(t, I)
while(true);
@@ -93,7 +93,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel no_create(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-present-clause.c b/clang/test/SemaOpenACC/compute-construct-present-clause.c
index 99c4b1dcd19b..1d50a6b1275b 100644
--- a/clang/test/SemaOpenACC/compute-construct-present-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-present-clause.c
@@ -28,11 +28,11 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
#pragma acc parallel present(LocalComposite.ScalarMember, LocalComposite.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present(+IntParam)
while(1);
@@ -45,10 +45,10 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present((float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-present-clause.cpp b/clang/test/SemaOpenACC/compute-construct-present-clause.cpp
index 62e481dea3e2..db230d0b1d9d 100644
--- a/clang/test/SemaOpenACC/compute-construct-present-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-present-clause.cpp
@@ -31,11 +31,11 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
#pragma acc parallel present(LocalComposite2.ScalarMember, LocalComposite2.ScalarMember)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present(1 + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present(+IntParam)
while(1);
@@ -48,27 +48,27 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present((float)ArrayParam[2])
while(1);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present(+t)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel present(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel present(t, I)
while(true);
@@ -93,7 +93,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel present(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-private-clause.c b/clang/test/SemaOpenACC/compute-construct-private-clause.c
index d2615c384cdb..3e6dbaafbc6f 100644
--- a/clang/test/SemaOpenACC/compute-construct-private-clause.c
+++ b/clang/test/SemaOpenACC/compute-construct-private-clause.c
@@ -89,13 +89,13 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
// Invalid cases, arbitrary expressions.
struct Incomplete *I;
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(*I)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(GlobalInt + IntParam)
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(+GlobalInt)
while(1);
@@ -128,10 +128,10 @@ void uses(int IntParam, short *PointerParam, float ArrayParam[5], Complete Compo
while(1);
// expected-error@+2{{OpenACC sub-array specified range [2:5] would be out of the range of the subscripted array size of 5}}
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private((float*)ArrayParam[2:5])
while(1);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private((float)ArrayParam[2])
while(1);
}
diff --git a/clang/test/SemaOpenACC/compute-construct-private-clause.cpp b/clang/test/SemaOpenACC/compute-construct-private-clause.cpp
index a776b16f0feb..fb9e89a21acc 100644
--- a/clang/test/SemaOpenACC/compute-construct-private-clause.cpp
+++ b/clang/test/SemaOpenACC/compute-construct-private-clause.cpp
@@ -64,34 +64,34 @@ void uses(int IntParam, char *PointerParam, float ArrayParam[5], Complete Compos
// Invalid cases, arbitrary expressions.
Incomplete *I;
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(*I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(GlobalInt + IntParam)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(+GlobalInt)
while(true);
}
template<typename T, unsigned I, typename V>
void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(+t)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(+I)
while(true);
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#TEMPL_USES_INST{{in instantiation of}}
#pragma acc parallel private(I)
while(true);
- // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
#pragma acc parallel private(t, I)
while(true);
@@ -120,7 +120,7 @@ void TemplUses(T t, T (&arrayT)[I], V TemplComp) {
template<unsigned I, auto &NTTP_REF>
void NTTP() {
// NTTP's are only valid if it is a reference to something.
- // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+ // expected-error@+2{{OpenACC variable is not a valid variable name, sub-array, array element, member of a composite variable, or composite variable member}}
// expected-note@#NTTP_INST{{in instantiation of}}
#pragma acc parallel private(I)
while(true);
diff --git a/clang/test/SemaOpenACC/compute-construct-reduction-clause.c b/clang/test/SemaOpenACC/compute-construct-reduction-clause.c
new file mode 100644
index 000000000000..9c0debd34503
--- /dev/null
+++ b/clang/test/SemaOpenACC/compute-construct-reduction-clause.c
@@ -0,0 +1,107 @@
+// RUN: %clang_cc1 %s -fopenacc -verify
+
+struct CompositeOfScalars {
+ int I;
+ float F;
+ short J;
+ char C;
+ double D;
+ _Complex float CF;
+ _Complex double CD;
+};
+
+struct CompositeHasComposite {
+ int I;
+ float F;
+ short J;
+ char C;
+ double D;
+ _Complex float CF;
+ _Complex double CD;
+ struct CompositeOfScalars COS; // #COS_FIELD
+};
+
+void uses(unsigned Parm) {
+ float Var;
+ int IVar;
+
+#pragma acc parallel reduction(+:Parm)
+ while (1);
+#pragma acc serial reduction(+:Parm)
+ while (1);
+ // expected-error@+1{{OpenACC 'reduction' clause is not valid on 'kernels' directive}}
+#pragma acc kernels reduction(+:Parm)
+ while (1);
+
+ // On a 'parallel', 'num_gangs' cannot have >1 args. num_gangs not valid on
+ // 'serial', but 'reduction' not valid on 'kernels', other combos cannot be
+ // tested.
+#pragma acc parallel reduction(+:Parm) num_gangs(IVar)
+ while (1);
+#pragma acc parallel num_gangs(IVar) reduction(+:IVar)
+ while (1);
+
+ // expected-error@+2{{OpenACC 'reduction' clause may not appear on a 'parallel' construct with a 'num_gangs' clause with more than 1 argument, have 2}}
+ // expected-note@+1{{previous clause is here}}
+#pragma acc parallel reduction(+:Parm) num_gangs(Parm, IVar)
+ while (1);
+
+ // expected-error@+2{{OpenACC 'reduction' clause may not appear on a 'parallel' construct with a 'num_gangs' clause with more than 1 argument, have 2}}
+ // expected-note@+1{{previous clause is here}}
+#pragma acc parallel num_gangs(Parm, IVar) reduction(+:Var)
+ while (1);
+
+ struct CompositeOfScalars CoS;
+ struct CompositeOfScalars *CoSPtr;
+ struct CompositeHasComposite ChC;
+ struct CompositeHasComposite *ChCPtr;
+
+ int I;
+ float F;
+ int Array[5];
+
+ // Vars in a reduction must be a scalar or a composite of scalars.
+#pragma acc parallel reduction(&: CoS, I, F)
+ while (1);
+ // expected-error@+2{{OpenACC 'reduction' composite variable must not have non-scalar field}}
+ // expected-note@#COS_FIELD{{invalid field is here}}
+#pragma acc parallel reduction(&: ChC)
+ while (1);
+
+ // expected-error@+1{{OpenACC 'reduction' variable must be of scalar type, sub-array, or a composite of scalar types; type is 'int[5]'}}
+#pragma acc parallel reduction(&: Array)
+ while (1);
+
+#pragma acc parallel reduction(&: CoS, Array[I], Array[0:I])
+ while (1);
+
+ struct CompositeHasComposite ChCArray[5];
+ // expected-error@+1{{OpenACC 'reduction' variable must be of scalar type, sub-array, or a composite of scalar types; sub-array base type is 'struct CompositeHasComposite'}}
+#pragma acc parallel reduction(&: CoS, Array[I], ChCArray[0:I])
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: CoS.I)
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: CoSPtr->I)
+
+ while (1);
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: ChC.COS)
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: ChCPtr->COS)
+ while (1);
+
+#pragma acc parallel reduction(&: I) reduction(&:I)
+ while (1);
+
+ struct HasArray { int array[5]; } HA;
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&:HA.array[1:2])
+ while (1);
+}
diff --git a/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp b/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp
new file mode 100644
index 000000000000..532dbb238716
--- /dev/null
+++ b/clang/test/SemaOpenACC/compute-construct-reduction-clause.cpp
@@ -0,0 +1,175 @@
+// RUN: %clang_cc1 %s -fopenacc -verify
+
+struct CompositeOfScalars {
+ int I;
+ float F;
+ short J;
+ char C;
+ double D;
+ _Complex float CF;
+ _Complex double CD;
+};
+
+struct CompositeHasComposite {
+ int I;
+ float F;
+ short J;
+ char C;
+ double D;
+ _Complex float CF;
+ _Complex double CD;
+ struct CompositeOfScalars COS; // #COS_FIELD
+};
+
+void uses(unsigned Parm) {
+ float Var;
+ int IVar;
+
+#pragma acc parallel reduction(+:Parm)
+ while (1);
+#pragma acc serial reduction(+:Parm)
+ while (1);
+ // expected-error@+1{{OpenACC 'reduction' clause is not valid on 'kernels' directive}}
+#pragma acc kernels reduction(+:Parm)
+ while (1);
+
+ // On a 'parallel', 'num_gangs' cannot have >1 args. num_gangs not valid on
+ // 'serial', but 'reduction' not valid on 'kernels', other combos cannot be
+ // tested.
+#pragma acc parallel reduction(+:Parm) num_gangs(IVar)
+ while (1);
+#pragma acc parallel num_gangs(IVar) reduction(+:Var)
+ while (1);
+
+ // expected-error@+2{{OpenACC 'reduction' clause may not appear on a 'parallel' construct with a 'num_gangs' clause with more than 1 argument, have 2}}
+ // expected-note@+1{{previous clause is here}}
+#pragma acc parallel reduction(+:Parm) num_gangs(Parm, IVar)
+ while (1);
+
+ // expected-error@+2{{OpenACC 'reduction' clause may not appear on a 'parallel' construct with a 'num_gangs' clause with more than 1 argument, have 2}}
+ // expected-note@+1{{previous clause is here}}
+#pragma acc parallel num_gangs(Parm, IVar) reduction(+:Var)
+ while (1);
+
+#pragma acc parallel reduction(+:Parm) reduction(+:Parm)
+ while (1);
+
+ struct CompositeOfScalars CoS;
+ struct CompositeOfScalars *CoSPtr;
+ struct CompositeHasComposite ChC;
+ struct CompositeHasComposite *ChCPtr;
+
+ int I;
+ float F;
+ int Array[5];
+
+ // Vars in a reduction must be a scalar or a composite of scalars.
+#pragma acc parallel reduction(&: CoS, I, F)
+ while (1);
+ // expected-error@+2{{OpenACC 'reduction' composite variable must not have non-scalar field}}
+ // expected-note@#COS_FIELD{{invalid field is here}}
+#pragma acc parallel reduction(&: ChC)
+ while (1);
+ // expected-error@+1{{OpenACC 'reduction' variable must be of scalar type, sub-array, or a composite of scalar types; type is 'int[5]'}}
+#pragma acc parallel reduction(&: Array)
+ while (1);
+
+#pragma acc parallel reduction(&: CoS, Array[I], Array[0:I])
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: CoS.I)
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: CoSPtr->I)
+
+ while (1);
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: ChC.COS)
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: ChCPtr->COS)
+ while (1);
+}
+
+template<typename T, typename U, typename V>
+void TemplUses(T Parm, U CoS, V ChC) {
+ T Var;
+ U *CoSPtr;
+ V *ChCPtr;
+
+#pragma acc parallel reduction(+:Parm)
+ while (1);
+#pragma acc serial reduction(+:Parm)
+ while (1);
+ // expected-error@+1{{OpenACC 'reduction' clause is not valid on 'kernels' directive}}
+#pragma acc kernels reduction(+:Parm)
+ while (1);
+
+ // On a 'parallel', 'num_gangs' cannot have >1 args. num_gangs not valid on
+ // 'serial', but 'reduction' not valid on 'kernels', other combos cannot be
+ // tested.
+#pragma acc parallel reduction(+:Parm) num_gangs(Var)
+ while (1);
+#pragma acc parallel num_gangs(Var) reduction(+:Var)
+ while (1);
+
+ // expected-error@+2{{OpenACC 'reduction' clause may not appear on a 'parallel' construct with a 'num_gangs' clause with more than 1 argument, have 2}}
+ // expected-note@+1{{previous clause is here}}
+#pragma acc parallel reduction(+:Parm) num_gangs(Parm, Var)
+ while (1);
+
+ // expected-error@+2{{OpenACC 'reduction' clause may not appear on a 'parallel' construct with a 'num_gangs' clause with more than 1 argument, have 2}}
+ // expected-note@+1{{previous clause is here}}
+#pragma acc parallel num_gangs(Parm, Var) reduction(+:Var)
+ while (1);
+
+#pragma acc parallel reduction(+:Parm) reduction(+:Parm)
+ while (1);
+
+ int NonDep;
+ int NonDepArray[5];
+ T Array[5];
+
+ // Vars in a reduction must be a scalar or a composite of scalars.
+#pragma acc parallel reduction(&: CoS, Var, Parm)
+ while (1);
+ // expected-error@+2{{OpenACC 'reduction' composite variable must not have non-scalar field}}
+ // expected-note@#COS_FIELD{{invalid field is here}}
+#pragma acc parallel reduction(&: ChC)
+ while (1);
+ // expected-error@+1{{OpenACC 'reduction' variable must be of scalar type, sub-array, or a composite of scalar types; type is 'int[5]'}}
+#pragma acc parallel reduction(&: Array)
+ while (1);
+ // expected-error@+1{{OpenACC 'reduction' variable must be of scalar type, sub-array, or a composite of scalar types; type is 'int[5]'}}
+#pragma acc parallel reduction(&: NonDepArray)
+ while (1);
+
+#pragma acc parallel reduction(&: CoS, Array[Var], Array[0:Var])
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: CoS.I)
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: CoSPtr->I)
+
+ while (1);
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: ChC.COS)
+ while (1);
+
+ // expected-error@+1{{OpenACC variable is not a valid variable name, sub-array, array element, or composite variable member}}
+#pragma acc parallel reduction(&: ChCPtr->COS)
+ while (1);
+}
+
+void inst() {
+ CompositeOfScalars CoS;
+ CompositeHasComposite ChC;
+ // expected-note@+1{{in instantiation of function template specialization}}
+ TemplUses(5, CoS, ChC);
+}
diff --git a/clang/test/SemaTemplate/cwg2398.cpp b/clang/test/SemaTemplate/cwg2398.cpp
index 31686c4bc980..e3b5e575374d 100644
--- a/clang/test/SemaTemplate/cwg2398.cpp
+++ b/clang/test/SemaTemplate/cwg2398.cpp
@@ -59,6 +59,21 @@ namespace templ {
template struct C<B<int>>;
} // namespace templ
+namespace class_template {
+ template <class T1, class T2 = float> struct A;
+
+ template <class T3> struct B;
+
+ template <template <class T4> class TT1, class T5> struct B<TT1<T5>>;
+ // new-note@-1 {{partial specialization matches}}
+
+ template <class T6, class T7> struct B<A<T6, T7>> {};
+ // new-note@-1 {{partial specialization matches}}
+
+ template struct B<A<int>>;
+ // new-error@-1 {{ambiguous partial specialization}}
+} // namespace class_template
+
namespace type_pack1 {
template<class T2> struct A;
template<template<class ...T3s> class TT1, class T4> struct A<TT1<T4>> ;
diff --git a/clang/test/SemaTemplate/dependent-names.cpp b/clang/test/SemaTemplate/dependent-names.cpp
index 641ec950054f..a7260b194462 100644
--- a/clang/test/SemaTemplate/dependent-names.cpp
+++ b/clang/test/SemaTemplate/dependent-names.cpp
@@ -1,4 +1,4 @@
-// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s
+// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s
typedef double A;
template<typename T> class B {
@@ -334,8 +334,9 @@ int arr[sizeof(Sub)];
namespace PR11421 {
template < unsigned > struct X {
static const unsigned dimension = 3;
- template<unsigned dim=dimension>
- struct Y: Y<dim> { }; // expected-error{{circular inheritance between 'Y<dim>' and 'Y<dim>'}}
+ template<unsigned dim=dimension>
+ struct Y: Y<dim> { }; // expected-error{{base class has incomplete type}}
+ // expected-note@-1{{definition of 'Y<dim>' is not complete until the closing '}'}}
};
typedef X<3> X3;
X3::Y<>::iterator it; // expected-error {{no type named 'iterator' in 'PR11421::X<3>::Y<>'}}
@@ -344,11 +345,12 @@ X3::Y<>::iterator it; // expected-error {{no type named 'iterator' in 'PR11421::
namespace rdar12629723 {
template<class T>
struct X {
- struct C : public C { }; // expected-error{{circular inheritance between 'C' and 'rdar12629723::X::C'}}
+ struct C : public C { }; // expected-error{{base class has incomplete type}}
+ // expected-note@-1{{definition of 'rdar12629723::X::C' is not complete until the closing '}'}}
struct B;
- struct A : public B { // expected-note{{'A' declared here}}
+ struct A : public B {
virtual void foo() { }
};
@@ -357,7 +359,7 @@ namespace rdar12629723 {
};
template<class T>
- struct X<T>::B : public A { // expected-error{{circular inheritance between 'A' and 'rdar12629723::X::B'}}
+ struct X<T>::B : public A {
virtual void foo() { }
};
}
diff --git a/clang/test/SemaTemplate/destructor-template.cpp b/clang/test/SemaTemplate/destructor-template.cpp
index 890188294762..7a3398308bbe 100644
--- a/clang/test/SemaTemplate/destructor-template.cpp
+++ b/clang/test/SemaTemplate/destructor-template.cpp
@@ -1,12 +1,14 @@
// RUN: %clang_cc1 -fsyntax-only -verify -std=c++11 %s
template<typename A> class s0 {
+ template<typename B> class s1;
+};
- template<typename B> class s1 : public s0<A> {
- ~s1() {}
- s0<A> ms0;
- };
-
+template<typename A>
+template<typename B>
+class s0<A>::s1 : s0<A> {
+ ~s1() {}
+ s0<A> ms0;
};
struct Incomplete;
@@ -28,7 +30,7 @@ namespace PR6152 {
y->template Y<T>::~Y<T>();
y->~Y();
}
-
+
template struct X<int>;
}
diff --git a/clang/test/SemaTemplate/ms-function-specialization-class-scope.cpp b/clang/test/SemaTemplate/ms-function-specialization-class-scope.cpp
index c49d2cb2422f..e1f3ab37ad94 100644
--- a/clang/test/SemaTemplate/ms-function-specialization-class-scope.cpp
+++ b/clang/test/SemaTemplate/ms-function-specialization-class-scope.cpp
@@ -464,6 +464,32 @@ namespace UsesThis {
g1(x1);
g1(y0);
g1(y1);
+
+ T::f0(0);
+ T::f0(z);
+ T::f0(x0);
+ T::f0(x1);
+ T::f0(y0);
+ T::f0(y1);
+ T::g0(0);
+ T::g0(z);
+ T::g0(x0);
+ T::g0(x1);
+ T::g0(y0);
+ T::g0(y1);
+
+ E::f1(0);
+ E::f1(z);
+ E::f1(x0);
+ E::f1(x1);
+ E::f1(y0);
+ E::f1(y1);
+ E::g1(0);
+ E::g1(z);
+ E::g1(x0);
+ E::g1(x1);
+ E::g1(y0);
+ E::g1(y1);
}
template<>
@@ -519,6 +545,32 @@ namespace UsesThis {
g1(x1); // expected-error {{invalid use of member 'x1' in static member function}}
g1(y0);
g1(y1);
+
+ T::f0(0); // expected-error {{call to non-static member function without an object argument}}
+ T::f0(z); // expected-error {{call to non-static member function without an object argument}}
+ T::f0(x0); // expected-error {{call to non-static member function without an object argument}}
+ T::f0(x1); // expected-error {{call to non-static member function without an object argument}}
+ T::f0(y0); // expected-error {{call to non-static member function without an object argument}}
+ T::f0(y1); // expected-error {{call to non-static member function without an object argument}}
+ T::g0(0);
+ T::g0(z);
+ T::g0(x0); // expected-error {{invalid use of member 'x0' in static member function}}
+ T::g0(x1); // expected-error {{invalid use of member 'x1' in static member function}}
+ T::g0(y0);
+ T::g0(y1);
+
+ E::f1(0); // expected-error {{call to non-static member function without an object argument}}
+ E::f1(z); // expected-error {{call to non-static member function without an object argument}}
+ E::f1(x0); // expected-error {{call to non-static member function without an object argument}}
+ E::f1(x1); // expected-error {{call to non-static member function without an object argument}}
+ E::f1(y0); // expected-error {{call to non-static member function without an object argument}}
+ E::f1(y1); // expected-error {{call to non-static member function without an object argument}}
+ E::g1(0);
+ E::g1(z);
+ E::g1(x0); // expected-error {{invalid use of member 'x0' in static member function}}
+ E::g1(x1); // expected-error {{invalid use of member 'x1' in static member function}}
+ E::g1(y0);
+ E::g1(y1);
}
};
diff --git a/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp b/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp
index 534a5dc9ddc1..547e5945ac6b 100644
--- a/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp
+++ b/clang/test/SemaTemplate/ms-lookup-template-base-classes.cpp
@@ -102,7 +102,7 @@ public:
};
template class B<int>; // expected-note {{requested here}}
-}
+}
@@ -111,8 +111,8 @@ namespace lookup_dependent_base_class_default_argument {
template<class T>
class A {
public:
- static int f1(); // expected-note {{must qualify identifier to find this declaration in dependent base class}}
- int f2(); // expected-note {{must qualify identifier to find this declaration in dependent base class}}
+ static int f1(); // expected-note {{must qualify identifier to find this declaration in dependent base class}}
+ int f2(); // expected-note {{must qualify identifier to find this declaration in dependent base class}}
};
template<class T>
@@ -137,7 +137,7 @@ namespace lookup_dependent_base_class_friend {
template <class T>
class B {
public:
- static void g(); // expected-note {{must qualify identifier to find this declaration in dependent base class}}
+ static void g(); // expected-note {{must qualify identifier to find this declaration in dependent base class}}
};
template <class T>
@@ -228,7 +228,7 @@ template <typename T> struct C : T {
int *bar() { return &b; } // expected-error {{no member named 'b' in 'PR16014::C<A>'}} expected-warning {{lookup into dependent bases}}
int baz() { return T::b; } // expected-error {{no member named 'b' in 'PR16014::A'}}
int T::*qux() { return &T::b; } // expected-error {{no member named 'b' in 'PR16014::A'}}
- int T::*fuz() { return &U::a; } // expected-error {{use of undeclared identifier 'U'}} \
+ int T::*fuz() { return &U::a; } // expected-error {{no member named 'U' in 'PR16014::C<A>'}} \
// expected-warning {{unqualified lookup into dependent bases of class template 'C'}}
};
@@ -258,7 +258,7 @@ struct A : T {
::UndefClass::undef(); // expected-error {{no member named 'UndefClass' in the global namespace}}
}
void baz() {
- B::qux(); // expected-error {{use of undeclared identifier 'B'}} \
+ B::qux(); // expected-error {{no member named 'B' in 'PR19233::A<D>'}} \
// expected-warning {{unqualified lookup into dependent bases of class template 'A'}}
}
};
diff --git a/clang/test/SemaTemplate/typo-dependent-name.cpp b/clang/test/SemaTemplate/typo-dependent-name.cpp
index fb61b03e5010..5bd924241480 100644
--- a/clang/test/SemaTemplate/typo-dependent-name.cpp
+++ b/clang/test/SemaTemplate/typo-dependent-name.cpp
@@ -31,8 +31,7 @@ struct Y {
static int z;
template<int U>
- struct Inner : Y { // expected-note {{declared here}}
- };
+ struct Inner; // expected-note {{declared here}}
bool f(T other) {
// We can determine that 'inner' does not exist at parse time, so can
@@ -41,5 +40,9 @@ struct Y {
}
};
+template<typename T>
+template<int U>
+struct Y<T>::Inner : Y { };
+
struct Q { constexpr operator int() { return 0; } };
void use_y(Y<Q> x) { x.f(Q()); }
diff --git a/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp b/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp
index 7c9071be0918..7338872dbf32 100644
--- a/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp
+++ b/clang/tools/amdgpu-arch/AMDGPUArchByHIP.cpp
@@ -1,4 +1,4 @@
-//===- AMDGPUArch.cpp - list AMDGPU installed ----------*- C++ -*---------===//
+//===- AMDGPUArchByHIP.cpp - list AMDGPU installed ----------*- C++ -*-----===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp b/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp
index f82a4890f465..432f2c414ed2 100644
--- a/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp
+++ b/clang/tools/amdgpu-arch/AMDGPUArchByHSA.cpp
@@ -1,4 +1,4 @@
-//===- AMDGPUArchLinux.cpp - list AMDGPU installed ------*- C++ -*---------===//
+//===- AMDGPUArchByHSA.cpp - list AMDGPU installed ------*- C++ -*---------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
diff --git a/clang/tools/clang-format/ClangFormat.cpp b/clang/tools/clang-format/ClangFormat.cpp
index 3fa5f81a3576..50dd073c4125 100644
--- a/clang/tools/clang-format/ClangFormat.cpp
+++ b/clang/tools/clang-format/ClangFormat.cpp
@@ -352,7 +352,7 @@ emitReplacementWarnings(const Replacements &Replaces, StringRef AssumedFileName,
unsigned Errors = 0;
if (WarnFormat && !NoWarnFormat) {
- llvm::SourceMgr Mgr;
+ SourceMgr Mgr;
const char *StartBuf = Code->getBufferStart();
Mgr.AddNewSourceBuffer(
@@ -447,11 +447,11 @@ static bool format(StringRef FileName, bool ErrorOnIncompleteFormat = false) {
return true;
}
- llvm::Expected<FormatStyle> FormatStyle =
+ Expected<FormatStyle> FormatStyle =
getStyle(Style, AssumedFileName, FallbackStyle, Code->getBuffer(),
nullptr, WNoErrorList.isSet(WNoError::Unknown));
if (!FormatStyle) {
- llvm::errs() << llvm::toString(FormatStyle.takeError()) << "\n";
+ llvm::errs() << toString(FormatStyle.takeError()) << "\n";
return true;
}
@@ -497,7 +497,7 @@ static bool format(StringRef FileName, bool ErrorOnIncompleteFormat = false) {
auto ChangedCode = tooling::applyAllReplacements(Code->getBuffer(), Replaces);
if (!ChangedCode) {
- llvm::errs() << llvm::toString(ChangedCode.takeError()) << "\n";
+ llvm::errs() << toString(ChangedCode.takeError()) << "\n";
return true;
}
// Get new affected ranges after sorting `#includes`.
@@ -567,14 +567,12 @@ static int dumpConfig() {
}
Code = std::move(CodeOrErr.get());
}
- llvm::Expected<clang::format::FormatStyle> FormatStyle =
- clang::format::getStyle(Style,
- FileNames.empty() || FileNames[0] == "-"
- ? AssumeFileName
- : FileNames[0],
- FallbackStyle, Code ? Code->getBuffer() : "");
+ Expected<clang::format::FormatStyle> FormatStyle = clang::format::getStyle(
+ Style,
+ FileNames.empty() || FileNames[0] == "-" ? AssumeFileName : FileNames[0],
+ FallbackStyle, Code ? Code->getBuffer() : "");
if (!FormatStyle) {
- llvm::errs() << llvm::toString(FormatStyle.takeError()) << "\n";
+ llvm::errs() << toString(FormatStyle.takeError()) << "\n";
return 1;
}
std::string Config = clang::format::configurationAsText(*FormatStyle);
@@ -671,7 +669,7 @@ static bool isIgnored(StringRef FilePath) {
}
int main(int argc, const char **argv) {
- llvm::InitLLVM X(argc, argv);
+ InitLLVM X(argc, argv);
cl::HideUnrelatedOptions(ClangFormatCategory);
diff --git a/clang/tools/clang-installapi/InstallAPIOpts.td b/clang/tools/clang-installapi/InstallAPIOpts.td
index a95a7a80a9d2..fc0fbe929c88 100644
--- a/clang/tools/clang-installapi/InstallAPIOpts.td
+++ b/clang/tools/clang-installapi/InstallAPIOpts.td
@@ -99,6 +99,9 @@ def X__ : Joined<["-"], "X">,
HelpText<"Pass <arg> to run unique clang invocation identified as <label>">,
MetaVarName<"<label> <arg>">;
+def option_list : Separate<["-"], "optionlist">, MetaVarName<"<path>">,
+ HelpText<"Specifies the <path> to a file that contains X<label> arguments to parse.">;
+
//
/// Overidden clang options for different behavior.
//
diff --git a/clang/tools/clang-installapi/Options.cpp b/clang/tools/clang-installapi/Options.cpp
index 53340da704fc..95d28b7b040d 100644
--- a/clang/tools/clang-installapi/Options.cpp
+++ b/clang/tools/clang-installapi/Options.cpp
@@ -13,6 +13,7 @@
#include "clang/InstallAPI/HeaderFile.h"
#include "clang/InstallAPI/InstallAPIDiagnostic.h"
#include "llvm/BinaryFormat/Magic.h"
+#include "llvm/Support/JSON.h"
#include "llvm/Support/Program.h"
#include "llvm/TargetParser/Host.h"
#include "llvm/TextAPI/DylibReader.h"
@@ -82,6 +83,47 @@ static llvm::opt::OptTable *createDriverOptTable() {
return new DriverOptTable();
}
+/// Parse JSON input into argument list.
+///
+/* Expected input format.
+ * { "label" : ["-ClangArg1", "-ClangArg2"] }
+ */
+///
+/// Input is interpreted as "-Xlabel ClangArg1 -XLabel ClangArg2".
+static Expected<llvm::opt::InputArgList>
+getArgListFromJSON(const StringRef Input, llvm::opt::OptTable *Table,
+ std::vector<std::string> &Storage) {
+ using namespace json;
+ Expected<Value> ValOrErr = json::parse(Input);
+ if (!ValOrErr)
+ return ValOrErr.takeError();
+
+ const Object *Root = ValOrErr->getAsObject();
+ if (!Root)
+ return llvm::opt::InputArgList();
+
+ for (const auto &KV : *Root) {
+ const Array *ArgList = KV.getSecond().getAsArray();
+ std::string Label = "-X" + KV.getFirst().str();
+ if (!ArgList)
+ return make_error<TextAPIError>(TextAPIErrorCode::InvalidInputFormat);
+ for (auto Arg : *ArgList) {
+ std::optional<StringRef> ArgStr = Arg.getAsString();
+ if (!ArgStr)
+ return make_error<TextAPIError>(TextAPIErrorCode::InvalidInputFormat);
+ Storage.emplace_back(Label);
+ Storage.emplace_back(*ArgStr);
+ }
+ }
+
+ std::vector<const char *> CArgs(Storage.size());
+ llvm::for_each(Storage,
+ [&CArgs](StringRef Str) { CArgs.emplace_back(Str.data()); });
+
+ unsigned MissingArgIndex, MissingArgCount;
+ return Table->ParseArgs(CArgs, MissingArgIndex, MissingArgCount);
+}
+
bool Options::processDriverOptions(InputArgList &Args) {
// Handle inputs.
llvm::append_range(DriverOpts.FileLists,
@@ -348,6 +390,31 @@ bool Options::processXarchOption(InputArgList &Args, arg_iterator Curr) {
return true;
}
+bool Options::processOptionList(InputArgList &Args,
+ llvm::opt::OptTable *Table) {
+ Arg *A = Args.getLastArg(OPT_option_list);
+ if (!A)
+ return true;
+
+ const StringRef Path = A->getValue(0);
+ auto InputOrErr = FM->getBufferForFile(Path);
+ if (auto Err = InputOrErr.getError()) {
+ Diags->Report(diag::err_cannot_open_file) << Path << Err.message();
+ return false;
+ }
+ // Backing storage referenced for argument processing.
+ std::vector<std::string> Storage;
+ auto ArgsOrErr =
+ getArgListFromJSON((*InputOrErr)->getBuffer(), Table, Storage);
+
+ if (auto Err = ArgsOrErr.takeError()) {
+ Diags->Report(diag::err_cannot_read_input_list)
+ << "option" << Path << toString(std::move(Err));
+ return false;
+ }
+ return processInstallAPIXOptions(*ArgsOrErr);
+}
+
bool Options::processLinkerOptions(InputArgList &Args) {
// Handle required arguments.
if (const Arg *A = Args.getLastArg(drv::OPT_install__name))
@@ -510,6 +577,9 @@ Options::processAndFilterOutInstallAPIOptions(ArrayRef<const char *> Args) {
if (!processInstallAPIXOptions(ParsedArgs))
return {};
+ if (!processOptionList(ParsedArgs, Table.get()))
+ return {};
+
DriverOpts.Demangle = ParsedArgs.hasArg(OPT_demangle);
if (auto *A = ParsedArgs.getLastArg(OPT_filetype)) {
@@ -818,7 +888,7 @@ InstallAPIContext Options::createContext() {
Expected<AliasMap> Result = parseAliasList(Buffer.get());
if (!Result) {
Diags->Report(diag::err_cannot_read_input_list)
- << /*IsFileList=*/false << ListPath << toString(Result.takeError());
+ << "symbol alias" << ListPath << toString(Result.takeError());
return Ctx;
}
Aliases.insert(Result.get().begin(), Result.get().end());
@@ -839,7 +909,7 @@ InstallAPIContext Options::createContext() {
if (auto Err = FileListReader::loadHeaders(std::move(Buffer.get()),
Ctx.InputHeaders, FM)) {
Diags->Report(diag::err_cannot_read_input_list)
- << /*IsFileList=*/true << ListPath << std::move(Err);
+ << "header file" << ListPath << std::move(Err);
return Ctx;
}
}
diff --git a/clang/tools/clang-installapi/Options.h b/clang/tools/clang-installapi/Options.h
index fd1e10065d10..b37f91efbda7 100644
--- a/clang/tools/clang-installapi/Options.h
+++ b/clang/tools/clang-installapi/Options.h
@@ -161,6 +161,8 @@ private:
bool processXarchOption(llvm::opt::InputArgList &Args, arg_iterator Curr);
bool processXplatformOption(llvm::opt::InputArgList &Args, arg_iterator Curr);
bool processXprojectOption(llvm::opt::InputArgList &Args, arg_iterator Curr);
+ bool processOptionList(llvm::opt::InputArgList &Args,
+ llvm::opt::OptTable *Table);
public:
/// The various options grouped together.
diff --git a/clang/tools/libclang/CIndex.cpp b/clang/tools/libclang/CIndex.cpp
index bfbdb5be9ff2..d0d654568d84 100644
--- a/clang/tools/libclang/CIndex.cpp
+++ b/clang/tools/libclang/CIndex.cpp
@@ -776,10 +776,9 @@ bool CursorVisitor::VisitTemplateTypeParmDecl(TemplateTypeParmDecl *D) {
}
// Visit the default argument.
- if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited())
- if (TypeSourceInfo *DefArg = D->getDefaultArgumentInfo())
- if (Visit(DefArg->getTypeLoc()))
- return true;
+ if (D->hasDefaultArgument() && !D->defaultArgumentWasInherited() &&
+ VisitTemplateArgumentLoc(D->getDefaultArgument()))
+ return true;
return false;
}
@@ -2855,6 +2854,10 @@ void OpenACCClauseEnqueue::VisitWaitClause(const OpenACCWaitClause &C) {
}
void OpenACCClauseEnqueue::VisitDeviceTypeClause(
const OpenACCDeviceTypeClause &C) {}
+void OpenACCClauseEnqueue::VisitReductionClause(
+ const OpenACCReductionClause &C) {
+ VisitVarList(C);
+}
} // namespace
void EnqueueVisitor::EnqueueChildren(const OpenACCClause *C) {
diff --git a/clang/unittests/AST/ASTImporterTest.cpp b/clang/unittests/AST/ASTImporterTest.cpp
index 4ee64de697d3..ba630002c055 100644
--- a/clang/unittests/AST/ASTImporterTest.cpp
+++ b/clang/unittests/AST/ASTImporterTest.cpp
@@ -1188,7 +1188,7 @@ TEST_P(ASTImporterOptionSpecificTestBase, TemplateTypeParmDeclDefaultArg) {
FromTU, templateTypeParmDecl(hasName("T")));
TemplateTypeParmDecl *To = Import(From, Lang_CXX03);
ASSERT_TRUE(To->hasDefaultArgument());
- QualType ToArg = To->getDefaultArgument();
+ QualType ToArg = To->getDefaultArgument().getArgument().getAsType();
ASSERT_EQ(ToArg, QualType(To->getASTContext().IntTy));
}
diff --git a/clang/unittests/AST/Interp/Descriptor.cpp b/clang/unittests/AST/Interp/Descriptor.cpp
index 053d579ea391..3157b4d401f9 100644
--- a/clang/unittests/AST/Interp/Descriptor.cpp
+++ b/clang/unittests/AST/Interp/Descriptor.cpp
@@ -22,9 +22,10 @@ TEST(Descriptor, Primitives) {
" char s[4];\n"
" A a[3];\n"
" short l[3][3];\n"
+ " int EmptyA[0];\n"
"};\n"
"constexpr S d = {0.0, \"foo\", {{true, false}, {false, true}, {false, false}},\n"
- " {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}};\n";
+ " {{1, 2, 3}, {4, 5, 6}, {7, 8, 9}}, {}};\n";
auto AST = tooling::buildASTFromCodeWithArgs(
Code, {"-fexperimental-new-constant-interpreter"});
@@ -64,7 +65,7 @@ TEST(Descriptor, Primitives) {
// Test the Record for the struct S.
const Record *SRecord = GlobalDesc->ElemRecord;
ASSERT_TRUE(SRecord);
- ASSERT_TRUE(SRecord->getNumFields() == 4);
+ ASSERT_TRUE(SRecord->getNumFields() == 5);
ASSERT_TRUE(SRecord->getNumBases() == 0);
ASSERT_FALSE(SRecord->getDestructor());
@@ -113,6 +114,16 @@ TEST(Descriptor, Primitives) {
ASSERT_TRUE(F4->Desc->getElemSize() > 0);
ASSERT_TRUE(F4->Desc->ElemDesc->isPrimitiveArray());
+ // Fifth field. Zero-size array.
+ const Record::Field *F5 = SRecord->getField(4u);
+ ASSERT_TRUE(F5);
+ ASSERT_FALSE(F5->isBitField());
+ ASSERT_TRUE(F5->Desc->isArray());
+ ASSERT_FALSE(F5->Desc->isCompositeArray());
+ ASSERT_TRUE(F5->Desc->isPrimitiveArray());
+ ASSERT_FALSE(F5->Desc->isPrimitive());
+ ASSERT_EQ(F5->Desc->getNumElems(), 0u);
+
// Check pointer stuff.
// Global variables have an inline descriptor.
ASSERT_TRUE(GlobalPtr.isRoot());
@@ -382,4 +393,13 @@ TEST(Descriptor, Primitives) {
ASSERT_EQ(PE3.getArray(), NE3);
ASSERT_EQ(PE3.getIndex(), 2u);
}
+
+ // Zero-size array.
+ {
+ const Pointer &PF5 = GlobalPtr.atField(F5->Offset);
+
+ ASSERT_TRUE(PF5.isZeroSizeArray());
+ ASSERT_FALSE(PF5.isOnePastEnd());
+ ASSERT_FALSE(PF5.isElementPastEnd());
+ }
}
diff --git a/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp b/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp
index 65df513d2713..2e42b8580895 100644
--- a/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp
+++ b/clang/unittests/ASTMatchers/ASTMatchersNodeTest.cpp
@@ -614,8 +614,10 @@ TEST_P(ASTMatchersTest, MemberExpr_MatchesVariable) {
EXPECT_TRUE(matches("template <class T>"
"class X : T { void f() { this->T::v; } };",
cxxDependentScopeMemberExpr()));
- EXPECT_TRUE(matches("template <class T> class X : T { void f() { T::v; } };",
- cxxDependentScopeMemberExpr()));
+ // FIXME: Add a matcher for DependentScopeDeclRefExpr.
+ EXPECT_TRUE(
+ notMatches("template <class T> class X : T { void f() { T::v; } };",
+ cxxDependentScopeMemberExpr()));
EXPECT_TRUE(matches("template <class T> void x() { T t; t.v; }",
cxxDependentScopeMemberExpr()));
}
diff --git a/clang/unittests/Format/CleanupTest.cpp b/clang/unittests/Format/CleanupTest.cpp
index dc149b502bc5..a3801106e1ce 100644
--- a/clang/unittests/Format/CleanupTest.cpp
+++ b/clang/unittests/Format/CleanupTest.cpp
@@ -20,10 +20,9 @@ namespace clang {
namespace format {
namespace {
-class CleanupTest : public ::testing::Test {
+class CleanupTest : public testing::Test {
protected:
- std::string cleanup(llvm::StringRef Code,
- const std::vector<tooling::Range> &Ranges,
+ std::string cleanup(StringRef Code, const std::vector<tooling::Range> &Ranges,
const FormatStyle &Style = getLLVMStyle()) {
tooling::Replacements Replaces = format::cleanup(Style, Code, Ranges);
@@ -33,8 +32,7 @@ protected:
}
// Returns code after cleanup around \p Offsets.
- std::string cleanupAroundOffsets(llvm::ArrayRef<unsigned> Offsets,
- llvm::StringRef Code,
+ std::string cleanupAroundOffsets(ArrayRef<unsigned> Offsets, StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
std::vector<tooling::Range> Ranges;
for (auto Offset : Offsets)
@@ -332,7 +330,7 @@ protected:
const tooling::Replacements &Replaces) {
auto CleanReplaces = cleanupAroundReplacements(Code, Replaces, Style);
EXPECT_TRUE(static_cast<bool>(CleanReplaces))
- << llvm::toString(CleanReplaces.takeError()) << "\n";
+ << toString(CleanReplaces.takeError()) << "\n";
auto Result = applyAllReplacements(Code, *CleanReplaces);
EXPECT_TRUE(static_cast<bool>(Result));
return *Result;
@@ -342,10 +340,10 @@ protected:
const tooling::Replacements &Replaces) {
auto CleanReplaces = cleanupAroundReplacements(Code, Replaces, Style);
EXPECT_TRUE(static_cast<bool>(CleanReplaces))
- << llvm::toString(CleanReplaces.takeError()) << "\n";
+ << toString(CleanReplaces.takeError()) << "\n";
auto FormattedReplaces = formatReplacements(Code, *CleanReplaces, Style);
EXPECT_TRUE(static_cast<bool>(FormattedReplaces))
- << llvm::toString(FormattedReplaces.takeError()) << "\n";
+ << toString(FormattedReplaces.takeError()) << "\n";
auto Result = applyAllReplacements(Code, *FormattedReplaces);
EXPECT_TRUE(static_cast<bool>(Result));
return *Result;
diff --git a/clang/unittests/Format/DefinitionBlockSeparatorTest.cpp b/clang/unittests/Format/DefinitionBlockSeparatorTest.cpp
index 7a120935cfa9..b26b9f4f4ff6 100644
--- a/clang/unittests/Format/DefinitionBlockSeparatorTest.cpp
+++ b/clang/unittests/Format/DefinitionBlockSeparatorTest.cpp
@@ -18,10 +18,10 @@ namespace clang {
namespace format {
namespace {
-class DefinitionBlockSeparatorTest : public ::testing::Test {
+class DefinitionBlockSeparatorTest : public testing::Test {
protected:
static std::string
- separateDefinitionBlocks(llvm::StringRef Code,
+ separateDefinitionBlocks(StringRef Code,
const std::vector<tooling::Range> &Ranges,
const FormatStyle &Style = getLLVMStyle()) {
LLVM_DEBUG(llvm::errs() << "---\n");
@@ -34,18 +34,17 @@ protected:
}
static std::string
- separateDefinitionBlocks(llvm::StringRef Code,
+ separateDefinitionBlocks(StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
return separateDefinitionBlocks(
Code,
/*Ranges=*/{1, tooling::Range(0, Code.size())}, Style);
}
- static void _verifyFormat(const char *File, int Line, llvm::StringRef Code,
+ static void _verifyFormat(const char *File, int Line, StringRef Code,
const FormatStyle &Style = getLLVMStyle(),
- llvm::StringRef ExpectedCode = "",
- bool Inverse = true) {
- ::testing::ScopedTrace t(File, Line, ::testing::Message() << Code.str());
+ StringRef ExpectedCode = "", bool Inverse = true) {
+ testing::ScopedTrace t(File, Line, testing::Message() << Code.str());
bool HasOriginalCode = true;
if (ExpectedCode == "") {
ExpectedCode = Code;
@@ -70,7 +69,7 @@ protected:
EXPECT_EQ(ExpectedCode, Result) << "Test failed. Formatted:\n" << Result;
}
- static std::string removeEmptyLines(llvm::StringRef Code) {
+ static std::string removeEmptyLines(StringRef Code) {
std::string Result = "";
for (auto Char : Code.str()) {
if (Result.size()) {
@@ -165,13 +164,13 @@ TEST_F(DefinitionBlockSeparatorTest, Basic) {
TEST_F(DefinitionBlockSeparatorTest, FormatConflict) {
FormatStyle Style = getLLVMStyle();
Style.SeparateDefinitionBlocks = FormatStyle::SDS_Always;
- llvm::StringRef Code = "class Test {\n"
- "public:\n"
- " static void foo() {\n"
- " int t;\n"
- " return 1;\n"
- " }\n"
- "};";
+ StringRef Code = "class Test {\n"
+ "public:\n"
+ " static void foo() {\n"
+ " int t;\n"
+ " return 1;\n"
+ " }\n"
+ "};";
std::vector<tooling::Range> Ranges = {1, tooling::Range(0, Code.size())};
EXPECT_EQ(reformat(Style, Code, Ranges, "<stdin>").size(), 0u);
}
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index 6f57f10e12e8..a9df994189f0 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -3124,6 +3124,7 @@ TEST_F(FormatTest, FormatsLabels) {
" g();\n"
" }\n"
"}");
+
FormatStyle Style = getLLVMStyle();
Style.IndentGotoLabels = false;
verifyFormat("void f() {\n"
@@ -3163,6 +3164,13 @@ TEST_F(FormatTest, FormatsLabels) {
" }\n"
"}",
Style);
+
+ Style.ColumnLimit = 15;
+ verifyFormat("#define FOO \\\n"
+ "label: \\\n"
+ " break;",
+ Style);
+
// The opening brace may either be on the same unwrapped line as the colon or
// on a separate one. The formatter should recognize both.
Style = getLLVMStyle();
@@ -17332,12 +17340,14 @@ TEST_F(FormatTest, ConfigurableSpaceBeforeAssignmentOperators) {
verifyFormat("int a = 5;");
verifyFormat("a += 42;");
verifyFormat("a or_eq 8;");
+ verifyFormat("xor = foo;");
FormatStyle Spaces = getLLVMStyle();
Spaces.SpaceBeforeAssignmentOperators = false;
verifyFormat("int a= 5;", Spaces);
verifyFormat("a+= 42;", Spaces);
verifyFormat("a or_eq 8;", Spaces);
+ verifyFormat("xor= foo;", Spaces);
}
TEST_F(FormatTest, ConfigurableSpaceBeforeColon) {
diff --git a/clang/unittests/Format/FormatTestBase.h b/clang/unittests/Format/FormatTestBase.h
index eaadb1c9f83e..33110ca5d9ed 100644
--- a/clang/unittests/Format/FormatTestBase.h
+++ b/clang/unittests/Format/FormatTestBase.h
@@ -25,17 +25,17 @@ namespace test {
#define DEBUG_TYPE "format-test-base"
-class FormatTestBase : public ::testing::Test {
+class FormatTestBase : public testing::Test {
protected:
enum StatusCheck { SC_ExpectComplete, SC_ExpectIncomplete, SC_DoNotCheck };
virtual FormatStyle getDefaultStyle() const { return getLLVMStyle(); }
- virtual std::string messUp(llvm::StringRef Code) const {
+ virtual std::string messUp(StringRef Code) const {
return test::messUp(Code);
}
- std::string format(llvm::StringRef Code,
+ std::string format(StringRef Code,
const std::optional<FormatStyle> &Style = {},
StatusCheck CheckComplete = SC_ExpectComplete,
const std::vector<tooling::Range> &Ranges = {}) {
@@ -80,11 +80,11 @@ protected:
return Style;
}
- bool _verifyFormat(const char *File, int Line, llvm::StringRef Expected,
- llvm::StringRef Code,
+ bool _verifyFormat(const char *File, int Line, StringRef Expected,
+ StringRef Code,
const std::optional<FormatStyle> &Style = {},
const std::vector<tooling::Range> &Ranges = {}) {
- testing::ScopedTrace t(File, Line, ::testing::Message() << Code.str());
+ testing::ScopedTrace t(File, Line, testing::Message() << Code.str());
const auto ExpectedCode{Expected.str()};
auto FormattedCode{format(Code, Style, SC_ExpectComplete, Ranges)};
EXPECT_EQ(ExpectedCode, FormattedCode);
@@ -111,7 +111,7 @@ protected:
return true;
}
- void _verifyFormat(const char *File, int Line, llvm::StringRef Code,
+ void _verifyFormat(const char *File, int Line, StringRef Code,
const std::optional<FormatStyle> &Style = {}) {
if (!_verifyFormat(File, Line, Code, Code, Style))
return;
@@ -119,27 +119,26 @@ protected:
_verifyFormat(File, Line, Code, MessedUpCode, Style);
}
- void _verifyIncompleteFormat(const char *File, int Line, llvm::StringRef Code,
+ void _verifyIncompleteFormat(const char *File, int Line, StringRef Code,
const std::optional<FormatStyle> &Style = {}) {
- testing::ScopedTrace t(File, Line, ::testing::Message() << Code.str());
+ testing::ScopedTrace t(File, Line, testing::Message() << Code.str());
EXPECT_EQ(Code.str(), format(messUp(Code), Style, SC_ExpectIncomplete));
}
void
- _verifyIndependentOfContext(const char *File, int Line, llvm::StringRef Text,
+ _verifyIndependentOfContext(const char *File, int Line, StringRef Text,
const std::optional<FormatStyle> &Style = {}) {
_verifyFormat(File, Line, Text, Style);
- _verifyFormat(File, Line, llvm::Twine("void f() { " + Text + " }").str(),
- Style);
+ _verifyFormat(File, Line, Twine("void f() { " + Text + " }").str(), Style);
}
- void _verifyNoChange(const char *File, int Line, llvm::StringRef Code,
+ void _verifyNoChange(const char *File, int Line, StringRef Code,
const std::optional<FormatStyle> &Style = {}) {
_verifyFormat(File, Line, Code, Code, Style);
}
/// \brief Verify that clang-format does not crash on the given input.
- void verifyNoCrash(llvm::StringRef Code,
+ void verifyNoCrash(StringRef Code,
const std::optional<FormatStyle> &Style = {}) {
format(Code, Style, SC_DoNotCheck);
}
diff --git a/clang/unittests/Format/FormatTestCSharp.cpp b/clang/unittests/Format/FormatTestCSharp.cpp
index de261c094830..7166e4ec4de3 100644
--- a/clang/unittests/Format/FormatTestCSharp.cpp
+++ b/clang/unittests/Format/FormatTestCSharp.cpp
@@ -21,8 +21,8 @@ protected:
return getMicrosoftStyle(FormatStyle::LK_CSharp);
}
- static std::string format(llvm::StringRef Code, unsigned Offset,
- unsigned Length, const FormatStyle &Style) {
+ static std::string format(StringRef Code, unsigned Offset, unsigned Length,
+ const FormatStyle &Style) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
std::vector<tooling::Range> Ranges(1, tooling::Range(Offset, Length));
@@ -34,7 +34,7 @@ protected:
}
static std::string
- format(llvm::StringRef Code,
+ format(StringRef Code,
const FormatStyle &Style = getMicrosoftStyle(FormatStyle::LK_CSharp)) {
return format(Code, 0, Code.size(), Style);
}
diff --git a/clang/unittests/Format/FormatTestJS.cpp b/clang/unittests/Format/FormatTestJS.cpp
index 3aded8f3726d..b910ce620de7 100644
--- a/clang/unittests/Format/FormatTestJS.cpp
+++ b/clang/unittests/Format/FormatTestJS.cpp
@@ -16,10 +16,10 @@
namespace clang {
namespace format {
-class FormatTestJS : public ::testing::Test {
+class FormatTestJS : public testing::Test {
protected:
- static std::string format(llvm::StringRef Code, unsigned Offset,
- unsigned Length, const FormatStyle &Style) {
+ static std::string format(StringRef Code, unsigned Offset, unsigned Length,
+ const FormatStyle &Style) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
std::vector<tooling::Range> Ranges(1, tooling::Range(Offset, Length));
@@ -34,7 +34,7 @@ protected:
}
static std::string format(
- llvm::StringRef Code,
+ StringRef Code,
const FormatStyle &Style = getGoogleStyle(FormatStyle::LK_JavaScript)) {
return format(Code, 0, Code.size(), Style);
}
@@ -46,7 +46,7 @@ protected:
}
static void verifyFormat(
- llvm::StringRef Code,
+ StringRef Code,
const FormatStyle &Style = getGoogleStyle(FormatStyle::LK_JavaScript)) {
EXPECT_EQ(Code.str(), format(Code, Style)) << "Expected code is not stable";
std::string Result = format(test::messUp(Code), Style);
@@ -54,7 +54,7 @@ protected:
}
static void verifyFormat(
- llvm::StringRef Expected, llvm::StringRef Code,
+ StringRef Expected, StringRef Code,
const FormatStyle &Style = getGoogleStyle(FormatStyle::LK_JavaScript)) {
EXPECT_EQ(Expected.str(), format(Expected, Style))
<< "Expected code is not stable";
diff --git a/clang/unittests/Format/FormatTestJson.cpp b/clang/unittests/Format/FormatTestJson.cpp
index 3254802dc0d6..60e9f17855f7 100644
--- a/clang/unittests/Format/FormatTestJson.cpp
+++ b/clang/unittests/Format/FormatTestJson.cpp
@@ -16,10 +16,10 @@
namespace clang {
namespace format {
-class FormatTestJson : public ::testing::Test {
+class FormatTestJson : public testing::Test {
protected:
- static std::string format(llvm::StringRef Code, unsigned Offset,
- unsigned Length, const FormatStyle &Style) {
+ static std::string format(StringRef Code, unsigned Offset, unsigned Length,
+ const FormatStyle &Style) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
@@ -47,7 +47,7 @@ protected:
}
static std::string
- format(llvm::StringRef Code,
+ format(StringRef Code,
const FormatStyle &Style = getLLVMStyle(FormatStyle::LK_Json)) {
return format(Code, 0, Code.size(), Style);
}
@@ -58,13 +58,12 @@ protected:
return Style;
}
- static void verifyFormatStable(llvm::StringRef Code,
- const FormatStyle &Style) {
+ static void verifyFormatStable(StringRef Code, const FormatStyle &Style) {
EXPECT_EQ(Code.str(), format(Code, Style)) << "Expected code is not stable";
}
static void
- verifyFormat(llvm::StringRef Code,
+ verifyFormat(StringRef Code,
const FormatStyle &Style = getLLVMStyle(FormatStyle::LK_Json)) {
verifyFormatStable(Code, Style);
EXPECT_EQ(Code.str(), format(test::messUp(Code), Style));
diff --git a/clang/unittests/Format/FormatTestProto.cpp b/clang/unittests/Format/FormatTestProto.cpp
index 4a2d2d68248d..5adb532ae4a4 100644
--- a/clang/unittests/Format/FormatTestProto.cpp
+++ b/clang/unittests/Format/FormatTestProto.cpp
@@ -16,10 +16,10 @@
namespace clang {
namespace format {
-class FormatTestProto : public ::testing::Test {
+class FormatTestProto : public testing::Test {
protected:
- static std::string format(llvm::StringRef Code, unsigned Offset,
- unsigned Length, const FormatStyle &Style) {
+ static std::string format(StringRef Code, unsigned Offset, unsigned Length,
+ const FormatStyle &Style) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
std::vector<tooling::Range> Ranges(1, tooling::Range(Offset, Length));
@@ -30,13 +30,13 @@ protected:
return *Result;
}
- static std::string format(llvm::StringRef Code) {
+ static std::string format(StringRef Code) {
FormatStyle Style = getGoogleStyle(FormatStyle::LK_Proto);
Style.ColumnLimit = 60; // To make writing tests easier.
return format(Code, 0, Code.size(), Style);
}
- static void verifyFormat(llvm::StringRef Code) {
+ static void verifyFormat(StringRef Code) {
EXPECT_EQ(Code.str(), format(Code)) << "Expected code is not stable";
EXPECT_EQ(Code.str(), format(test::messUp(Code)));
}
diff --git a/clang/unittests/Format/FormatTestRawStrings.cpp b/clang/unittests/Format/FormatTestRawStrings.cpp
index 10f341cc8f79..0615fb1fad4c 100644
--- a/clang/unittests/Format/FormatTestRawStrings.cpp
+++ b/clang/unittests/Format/FormatTestRawStrings.cpp
@@ -21,12 +21,11 @@ namespace clang {
namespace format {
namespace {
-class FormatTestRawStrings : public ::testing::Test {
+class FormatTestRawStrings : public testing::Test {
protected:
enum StatusCheck { SC_ExpectComplete, SC_ExpectIncomplete, SC_DoNotCheck };
- std::string format(llvm::StringRef Code,
- const FormatStyle &Style = getLLVMStyle(),
+ std::string format(StringRef Code, const FormatStyle &Style = getLLVMStyle(),
StatusCheck CheckComplete = SC_ExpectComplete) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
diff --git a/clang/unittests/Format/FormatTestSelective.cpp b/clang/unittests/Format/FormatTestSelective.cpp
index c21c9bfe6079..3ae70a15d359 100644
--- a/clang/unittests/Format/FormatTestSelective.cpp
+++ b/clang/unittests/Format/FormatTestSelective.cpp
@@ -17,9 +17,9 @@ namespace clang {
namespace format {
namespace {
-class FormatTestSelective : public ::testing::Test {
+class FormatTestSelective : public testing::Test {
protected:
- std::string format(llvm::StringRef Code, unsigned Offset, unsigned Length) {
+ std::string format(StringRef Code, unsigned Offset, unsigned Length) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
std::vector<tooling::Range> Ranges(1, tooling::Range(Offset, Length));
diff --git a/clang/unittests/Format/FormatTestTableGen.cpp b/clang/unittests/Format/FormatTestTableGen.cpp
index 79b6961b00b4..7771f6a109a9 100644
--- a/clang/unittests/Format/FormatTestTableGen.cpp
+++ b/clang/unittests/Format/FormatTestTableGen.cpp
@@ -16,10 +16,10 @@
namespace clang {
namespace format {
-class FormatTestTableGen : public ::testing::Test {
+class FormatTestTableGen : public testing::Test {
protected:
- static std::string format(llvm::StringRef Code, unsigned Offset,
- unsigned Length, const FormatStyle &Style) {
+ static std::string format(StringRef Code, unsigned Offset, unsigned Length,
+ const FormatStyle &Style) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
std::vector<tooling::Range> Ranges(1, tooling::Range(Offset, Length));
@@ -30,22 +30,22 @@ protected:
return *Result;
}
- static std::string format(llvm::StringRef Code) {
+ static std::string format(StringRef Code) {
FormatStyle Style = getGoogleStyle(FormatStyle::LK_TableGen);
Style.ColumnLimit = 60; // To make writing tests easier.
return format(Code, 0, Code.size(), Style);
}
- static void verifyFormat(llvm::StringRef Code) {
+ static void verifyFormat(StringRef Code) {
EXPECT_EQ(Code.str(), format(Code)) << "Expected code is not stable";
EXPECT_EQ(Code.str(), format(test::messUp(Code)));
}
- static void verifyFormat(llvm::StringRef Result, llvm::StringRef MessedUp) {
+ static void verifyFormat(StringRef Result, StringRef MessedUp) {
EXPECT_EQ(Result, format(MessedUp));
}
- static void verifyFormat(llvm::StringRef Code, const FormatStyle &Style) {
+ static void verifyFormat(StringRef Code, const FormatStyle &Style) {
EXPECT_EQ(Code.str(), format(Code, 0, Code.size(), Style))
<< "Expected code is not stable";
auto MessUp = test::messUp(Code);
diff --git a/clang/unittests/Format/FormatTestUtils.h b/clang/unittests/Format/FormatTestUtils.h
index fa42b61c547d..cb55b2d747b6 100644
--- a/clang/unittests/Format/FormatTestUtils.h
+++ b/clang/unittests/Format/FormatTestUtils.h
@@ -27,7 +27,7 @@ inline FormatStyle getGoogleStyle() {
// When HandleHash is false, preprocessor directives starting with hash will not
// be on separate lines. This is needed because Verilog uses hash for other
// purposes.
-inline std::string messUp(llvm::StringRef Code, bool HandleHash = true) {
+inline std::string messUp(StringRef Code, bool HandleHash = true) {
std::string MessedUp(Code.str());
bool InComment = false;
bool InPreprocessorDirective = false;
diff --git a/clang/unittests/Format/FormatTestVerilog.cpp b/clang/unittests/Format/FormatTestVerilog.cpp
index abebf9f7d4c7..b5241a4e0d6a 100644
--- a/clang/unittests/Format/FormatTestVerilog.cpp
+++ b/clang/unittests/Format/FormatTestVerilog.cpp
@@ -19,7 +19,7 @@ protected:
FormatStyle getDefaultStyle() const override {
return getLLVMStyle(FormatStyle::LK_Verilog);
}
- std::string messUp(llvm::StringRef Code) const override {
+ std::string messUp(StringRef Code) const override {
return test::messUp(Code, /*HandleHash=*/false);
}
};
diff --git a/clang/unittests/Format/FormatTokenSourceTest.cpp b/clang/unittests/Format/FormatTokenSourceTest.cpp
index 74de93057df6..4f19e255004f 100644
--- a/clang/unittests/Format/FormatTokenSourceTest.cpp
+++ b/clang/unittests/Format/FormatTokenSourceTest.cpp
@@ -15,10 +15,9 @@ namespace clang {
namespace format {
namespace {
-class IndexedTokenSourceTest : public ::testing::Test {
+class IndexedTokenSourceTest : public testing::Test {
protected:
- TokenList lex(llvm::StringRef Code,
- const FormatStyle &Style = getLLVMStyle()) {
+ TokenList lex(StringRef Code, const FormatStyle &Style = getLLVMStyle()) {
return TestLexer(Allocator, Buffers, Style).lex(Code);
}
llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
diff --git a/clang/unittests/Format/MacroCallReconstructorTest.cpp b/clang/unittests/Format/MacroCallReconstructorTest.cpp
index 9df21eae70cb..acef5e79eaae 100644
--- a/clang/unittests/Format/MacroCallReconstructorTest.cpp
+++ b/clang/unittests/Format/MacroCallReconstructorTest.cpp
@@ -30,15 +30,14 @@ public:
// Appends the token stream obtained from expanding the macro Name given
// the provided arguments, to be later retrieved with getTokens().
// Returns the list of tokens making up the unexpanded macro call.
- TokenList
- expand(llvm::StringRef Name,
- const SmallVector<llvm::SmallVector<FormatToken *, 8>, 1> &Args) {
+ TokenList expand(StringRef Name,
+ const SmallVector<SmallVector<FormatToken *, 8>, 1> &Args) {
return expandInternal(Name, Args);
}
- TokenList expand(llvm::StringRef Name) { return expandInternal(Name, {}); }
+ TokenList expand(StringRef Name) { return expandInternal(Name, {}); }
- TokenList expand(llvm::StringRef Name, const std::vector<std::string> &Args) {
+ TokenList expand(StringRef Name, const std::vector<std::string> &Args) {
return expandInternal(Name, lexArgs(Args));
}
@@ -48,8 +47,8 @@ public:
private:
TokenList expandInternal(
- llvm::StringRef Name,
- const std::optional<SmallVector<llvm::SmallVector<FormatToken *, 8>, 1>>
+ StringRef Name,
+ const std::optional<SmallVector<SmallVector<FormatToken *, 8>, 1>>
&Args) {
auto *ID = Lex.id(Name);
auto UnexpandedLine = std::make_unique<UnwrappedLine>();
@@ -75,26 +74,25 @@ private:
return UnexpandedTokens;
}
- llvm::SmallVector<TokenList, 1>
- lexArgs(const std::vector<std::string> &Args) {
- llvm::SmallVector<TokenList, 1> Result;
+ SmallVector<TokenList, 1> lexArgs(const std::vector<std::string> &Args) {
+ SmallVector<TokenList, 1> Result;
for (const auto &Arg : Args)
Result.push_back(uneof(Lex.lex(Arg)));
return Result;
}
llvm::DenseMap<FormatToken *, std::unique_ptr<UnwrappedLine>> Unexpanded;
- llvm::SmallVector<FormatToken *, 8> Tokens;
+ SmallVector<FormatToken *, 8> Tokens;
TestLexer &Lex;
MacroExpander &Macros;
};
struct Chunk {
- Chunk(llvm::ArrayRef<FormatToken *> Tokens)
+ Chunk(ArrayRef<FormatToken *> Tokens)
: Tokens(Tokens.begin(), Tokens.end()) {}
- Chunk(llvm::ArrayRef<UnwrappedLine> Children)
+ Chunk(ArrayRef<UnwrappedLine> Children)
: Children(Children.begin(), Children.end()) {}
- llvm::SmallVector<UnwrappedLineNode, 1> Tokens;
- llvm::SmallVector<UnwrappedLine, 0> Children;
+ SmallVector<UnwrappedLineNode, 1> Tokens;
+ SmallVector<UnwrappedLine, 0> Children;
};
// Allows to produce chunks of a token list by typing the code of equal tokens.
@@ -140,7 +138,7 @@ UnexpandedMap mergeUnexpanded(const UnexpandedMap &M1,
return Result;
}
-class MacroCallReconstructorTest : public ::testing::Test {
+class MacroCallReconstructorTest : public testing::Test {
public:
MacroCallReconstructorTest() : Lex(Allocator, Buffers) {}
@@ -151,7 +149,7 @@ public:
Lex.Allocator, Lex.IdentTable);
}
- UnwrappedLine line(llvm::ArrayRef<FormatToken *> Tokens, unsigned Level = 0) {
+ UnwrappedLine line(ArrayRef<FormatToken *> Tokens, unsigned Level = 0) {
UnwrappedLine Result;
Result.Level = Level;
for (FormatToken *Tok : Tokens)
@@ -159,11 +157,11 @@ public:
return Result;
}
- UnwrappedLine line(llvm::StringRef Text, unsigned Level = 0) {
+ UnwrappedLine line(StringRef Text, unsigned Level = 0) {
return line({lex(Text)}, Level);
}
- UnwrappedLine line(llvm::ArrayRef<Chunk> Chunks, unsigned Level = 0) {
+ UnwrappedLine line(ArrayRef<Chunk> Chunks, unsigned Level = 0) {
UnwrappedLine Result;
Result.Level = Level;
for (const Chunk &Chunk : Chunks) {
@@ -176,13 +174,11 @@ public:
return Result;
}
- TokenList lex(llvm::StringRef Text) { return uneof(Lex.lex(Text)); }
+ TokenList lex(StringRef Text) { return uneof(Lex.lex(Text)); }
- Chunk tokens(llvm::StringRef Text) { return Chunk(lex(Text)); }
+ Chunk tokens(StringRef Text) { return Chunk(lex(Text)); }
- Chunk children(llvm::ArrayRef<UnwrappedLine> Children) {
- return Chunk(Children);
- }
+ Chunk children(ArrayRef<UnwrappedLine> Children) { return Chunk(Children); }
llvm::SpecificBumpPtrAllocator<FormatToken> Allocator;
std::vector<std::unique_ptr<llvm::MemoryBuffer>> Buffers;
diff --git a/clang/unittests/Format/MacroExpanderTest.cpp b/clang/unittests/Format/MacroExpanderTest.cpp
index 72302aa0cea7..e001c986dc2b 100644
--- a/clang/unittests/Format/MacroExpanderTest.cpp
+++ b/clang/unittests/Format/MacroExpanderTest.cpp
@@ -9,7 +9,7 @@ namespace format {
namespace {
-class MacroExpanderTest : public ::testing::Test {
+class MacroExpanderTest : public testing::Test {
public:
MacroExpanderTest() : Lex(Allocator, Buffers) {}
std::unique_ptr<MacroExpander>
@@ -19,33 +19,32 @@ public:
Lex.Allocator, Lex.IdentTable);
}
- std::string expand(MacroExpander &Macros, llvm::StringRef Name) {
+ std::string expand(MacroExpander &Macros, StringRef Name) {
EXPECT_TRUE(Macros.defined(Name))
<< "Macro not defined: \"" << Name << "\"";
return text(Macros.expand(Lex.id(Name), {}));
}
- std::string expand(MacroExpander &Macros, llvm::StringRef Name,
+ std::string expand(MacroExpander &Macros, StringRef Name,
const std::vector<std::string> &Args) {
EXPECT_TRUE(Macros.defined(Name))
<< "Macro not defined: \"" << Name << "\"";
return text(Macros.expand(Lex.id(Name), lexArgs(Args)));
}
- llvm::SmallVector<TokenList, 1>
- lexArgs(const std::vector<std::string> &Args) {
- llvm::SmallVector<TokenList, 1> Result;
+ SmallVector<TokenList, 1> lexArgs(const std::vector<std::string> &Args) {
+ SmallVector<TokenList, 1> Result;
for (const auto &Arg : Args)
Result.push_back(uneof(Lex.lex(Arg)));
return Result;
}
struct MacroAttributes {
- clang::tok::TokenKind Kind;
+ tok::TokenKind Kind;
MacroRole Role;
unsigned Start;
unsigned End;
- llvm::SmallVector<FormatToken *, 1> ExpandedFrom;
+ SmallVector<FormatToken *, 1> ExpandedFrom;
};
void expectAttributes(const TokenList &Tokens,
@@ -56,8 +55,8 @@ public:
if (I >= Attributes.size())
continue;
std::string Context =
- ("for token " + llvm::Twine(I) + ": " + Tokens[I]->Tok.getName() +
- " / " + Tokens[I]->TokenText)
+ ("for token " + Twine(I) + ": " + Tokens[I]->Tok.getName() + " / " +
+ Tokens[I]->TokenText)
.str();
EXPECT_TRUE(Tokens[I]->is(Attributes[I].Kind))
<< Context << " in " << text(Tokens) << " at " << File << ":" << Line;
diff --git a/clang/unittests/Format/MatchFilePathTest.cpp b/clang/unittests/Format/MatchFilePathTest.cpp
index 55723584ddc8..f41cf7f97159 100644
--- a/clang/unittests/Format/MatchFilePathTest.cpp
+++ b/clang/unittests/Format/MatchFilePathTest.cpp
@@ -13,7 +13,7 @@ namespace clang {
namespace format {
namespace {
-class MatchFilePathTest : public ::testing::Test {
+class MatchFilePathTest : public testing::Test {
protected:
bool match(llvm::StringRef FilePath, llvm::StringRef Pattern) {
return matchFilePath(Pattern, FilePath);
diff --git a/clang/unittests/Format/NamespaceEndCommentsFixerTest.cpp b/clang/unittests/Format/NamespaceEndCommentsFixerTest.cpp
index fe097e9961e2..2c45ad1cbe1c 100644
--- a/clang/unittests/Format/NamespaceEndCommentsFixerTest.cpp
+++ b/clang/unittests/Format/NamespaceEndCommentsFixerTest.cpp
@@ -17,16 +17,16 @@ namespace clang {
namespace format {
namespace {
-class NamespaceEndCommentsFixerTest : public ::testing::Test {
+class NamespaceEndCommentsFixerTest : public testing::Test {
protected:
std::string
- fixNamespaceEndComments(llvm::StringRef Code,
+ fixNamespaceEndComments(StringRef Code,
const std::vector<tooling::Range> &Ranges,
const FormatStyle &Style = getLLVMStyle()) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
tooling::Replacements Replaces =
- clang::format::fixNamespaceEndComments(Style, Code, Ranges, "<stdin>");
+ format::fixNamespaceEndComments(Style, Code, Ranges, "<stdin>");
auto Result = applyAllReplacements(Code, Replaces);
EXPECT_TRUE(static_cast<bool>(Result));
LLVM_DEBUG(llvm::errs() << "\n" << *Result << "\n\n");
@@ -34,7 +34,7 @@ protected:
}
std::string
- fixNamespaceEndComments(llvm::StringRef Code,
+ fixNamespaceEndComments(StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
return fixNamespaceEndComments(
Code,
@@ -43,8 +43,7 @@ protected:
bool isFormatted(StringRef Code, const std::vector<tooling::Range> &Ranges,
const FormatStyle &Style = getLLVMStyle()) const {
- return clang::format::fixNamespaceEndComments(Style, Code, Ranges,
- "<stdin>")
+ return format::fixNamespaceEndComments(Style, Code, Ranges, "<stdin>")
.empty();
}
diff --git a/clang/unittests/Format/ObjCPropertyAttributeOrderFixerTest.cpp b/clang/unittests/Format/ObjCPropertyAttributeOrderFixerTest.cpp
index 79ded6673591..9f852e4768b1 100644
--- a/clang/unittests/Format/ObjCPropertyAttributeOrderFixerTest.cpp
+++ b/clang/unittests/Format/ObjCPropertyAttributeOrderFixerTest.cpp
@@ -28,7 +28,7 @@ namespace {
class ObjCPropertyAttributeOrderFixerTest : public FormatTestBase {
protected:
- TokenList annotate(llvm::StringRef Code,
+ TokenList annotate(StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
return TestLexer(Allocator, Buffers, Style).annotate(Code);
}
diff --git a/clang/unittests/Format/QualifierFixerTest.cpp b/clang/unittests/Format/QualifierFixerTest.cpp
index 1f21fc0e0b42..3a5f63e5de65 100644
--- a/clang/unittests/Format/QualifierFixerTest.cpp
+++ b/clang/unittests/Format/QualifierFixerTest.cpp
@@ -28,7 +28,7 @@ namespace {
class QualifierFixerTest : public FormatTestBase {
protected:
- TokenList annotate(llvm::StringRef Code,
+ TokenList annotate(StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
return TestLexer(Allocator, Buffers, Style).annotate(Code);
}
diff --git a/clang/unittests/Format/SortImportsTestJS.cpp b/clang/unittests/Format/SortImportsTestJS.cpp
index f423cdd21d1a..59ce62117d4a 100644
--- a/clang/unittests/Format/SortImportsTestJS.cpp
+++ b/clang/unittests/Format/SortImportsTestJS.cpp
@@ -17,7 +17,7 @@ namespace clang {
namespace format {
namespace {
-class SortImportsTestJS : public ::testing::Test {
+class SortImportsTestJS : public testing::Test {
protected:
std::string sort(StringRef Code, unsigned Offset = 0, unsigned Length = 0) {
StringRef FileName = "input.js";
@@ -33,10 +33,9 @@ protected:
return *Formatted;
}
- void _verifySort(const char *File, int Line, llvm::StringRef Expected,
- llvm::StringRef Code, unsigned Offset = 0,
- unsigned Length = 0) {
- ::testing::ScopedTrace t(File, Line, ::testing::Message() << Code.str());
+ void _verifySort(const char *File, int Line, StringRef Expected,
+ StringRef Code, unsigned Offset = 0, unsigned Length = 0) {
+ testing::ScopedTrace t(File, Line, testing::Message() << Code.str());
std::string Result = sort(Code, Offset, Length);
EXPECT_EQ(Expected.str(), Result) << "Expected:\n"
<< Expected << "\nActual:\n"
diff --git a/clang/unittests/Format/SortImportsTestJava.cpp b/clang/unittests/Format/SortImportsTestJava.cpp
index 98a6826b1ff5..d577efa34f86 100644
--- a/clang/unittests/Format/SortImportsTestJava.cpp
+++ b/clang/unittests/Format/SortImportsTestJava.cpp
@@ -7,7 +7,7 @@ namespace clang {
namespace format {
namespace {
-class SortImportsTestJava : public ::testing::Test {
+class SortImportsTestJava : public testing::Test {
protected:
std::vector<tooling::Range> GetCodeRange(StringRef Code) {
return std::vector<tooling::Range>(1, tooling::Range(0, Code.size()));
diff --git a/clang/unittests/Format/SortIncludesTest.cpp b/clang/unittests/Format/SortIncludesTest.cpp
index 52ba19627182..2eeb16b4ab9f 100644
--- a/clang/unittests/Format/SortIncludesTest.cpp
+++ b/clang/unittests/Format/SortIncludesTest.cpp
@@ -43,7 +43,7 @@ protected:
return sort(Code, GetCodeRange(Code), FileName, ExpectedNumRanges);
}
- unsigned newCursor(llvm::StringRef Code, unsigned Cursor) {
+ unsigned newCursor(StringRef Code, unsigned Cursor) {
sortIncludes(FmtStyle, Code, GetCodeRange(Code), "input.cpp", &Cursor);
return Cursor;
}
@@ -644,7 +644,7 @@ TEST_F(SortIncludesTest, SupportOptionalCaseSensitiveSorting) {
"#include \"A/b.h\"",
"a.h"));
- Style.IncludeBlocks = clang::tooling::IncludeStyle::IBS_Regroup;
+ Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
Style.IncludeCategories = {
{"^\"", 1, 0, false}, {"^<.*\\.h>$", 2, 0, false}, {"^<", 3, 0, false}};
@@ -694,7 +694,7 @@ TEST_F(SortIncludesTest, SupportCaseInsensitiveMatching) {
}
TEST_F(SortIncludesTest, SupportOptionalCaseSensitiveMachting) {
- Style.IncludeBlocks = clang::tooling::IncludeStyle::IBS_Regroup;
+ Style.IncludeBlocks = tooling::IncludeStyle::IBS_Regroup;
Style.IncludeCategories = {{"^\"", 1, 0, false},
{"^<.*\\.h>$", 2, 0, false},
{"^<Q[A-Z][^\\.]*>", 3, 0, false},
diff --git a/clang/unittests/Format/TestLexer.h b/clang/unittests/Format/TestLexer.h
index 8b5949b32fc9..294d0106dbe2 100644
--- a/clang/unittests/Format/TestLexer.h
+++ b/clang/unittests/Format/TestLexer.h
@@ -28,7 +28,7 @@
namespace clang {
namespace format {
-typedef llvm::SmallVector<FormatToken *, 8> TokenList;
+typedef SmallVector<FormatToken *, 8> TokenList;
inline std::ostream &operator<<(std::ostream &Stream, const FormatToken &Tok) {
Stream << "(" << Tok.Tok.getName() << ", \"" << Tok.TokenText.str() << "\" , "
@@ -48,7 +48,7 @@ inline TokenList uneof(const TokenList &Tokens) {
return TokenList(Tokens.begin(), std::prev(Tokens.end()));
}
-inline std::string text(llvm::ArrayRef<FormatToken *> Tokens) {
+inline std::string text(ArrayRef<FormatToken *> Tokens) {
return std::accumulate(Tokens.begin(), Tokens.end(), std::string(),
[](const std::string &R, FormatToken *Tok) {
return (R + Tok->TokenText).str();
@@ -63,13 +63,13 @@ public:
: Allocator(Allocator), Buffers(Buffers), Style(Style),
SourceMgr("test.cpp", ""), IdentTable(getFormattingLangOpts(Style)) {}
- TokenList lex(llvm::StringRef Code) {
+ TokenList lex(StringRef Code) {
FormatTokenLexer Lex = getNewLexer(Code);
ArrayRef<FormatToken *> Result = Lex.lex();
return TokenList(Result.begin(), Result.end());
}
- TokenList annotate(llvm::StringRef Code) {
+ TokenList annotate(StringRef Code) {
FormatTokenLexer Lex = getNewLexer(Code);
auto Tokens = Lex.lex();
UnwrappedLineParser Parser(SourceMgr.get(), Style, Lex.getKeywords(), 0,
@@ -85,7 +85,7 @@ public:
return TokenList(Tokens.begin(), Tokens.end());
}
- FormatToken *id(llvm::StringRef Code) {
+ FormatToken *id(StringRef Code) {
auto Result = uneof(lex(Code));
assert(Result.size() == 1U && "Code must expand to 1 token.");
return Result[0];
@@ -100,7 +100,7 @@ protected:
FormatTokenLexer getNewLexer(StringRef Code) {
Buffers.push_back(
llvm::MemoryBuffer::getMemBufferCopy(Code, "<scratch space>"));
- clang::FileID FID =
+ FileID FID =
SourceMgr.get().createFileID(Buffers.back()->getMemBufferRef());
return FormatTokenLexer(SourceMgr.get(), FID, 0, Style, Encoding, Allocator,
IdentTable);
@@ -111,7 +111,7 @@ public:
std::vector<std::unique_ptr<llvm::MemoryBuffer>> &Buffers;
FormatStyle Style;
encoding::Encoding Encoding = encoding::Encoding_UTF8;
- clang::SourceManagerForFile SourceMgr;
+ SourceManagerForFile SourceMgr;
IdentifierTable IdentTable;
SmallVector<UnwrappedLine, 16> UnwrappedLines;
};
diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp
index aadfa6dc0165..6ea9c4a241dc 100644
--- a/clang/unittests/Format/TokenAnnotatorTest.cpp
+++ b/clang/unittests/Format/TokenAnnotatorTest.cpp
@@ -24,9 +24,9 @@ static bool operator==(const FormatToken &LHS,
namespace {
-class TokenAnnotatorTest : public ::testing::Test {
+class TokenAnnotatorTest : public testing::Test {
protected:
- TokenList annotate(llvm::StringRef Code,
+ TokenList annotate(StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
return TestLexer(Allocator, Buffers, Style).annotate(Code);
}
@@ -2097,7 +2097,7 @@ TEST_F(TokenAnnotatorTest, UnderstandsAttributeMacrosOnObjCProperty) {
}
TEST_F(TokenAnnotatorTest, UnderstandsVerilogOperators) {
- auto Annotate = [this](llvm::StringRef Code) {
+ auto Annotate = [this](StringRef Code) {
return annotate(Code, getLLVMStyle(FormatStyle::LK_Verilog));
};
// Test that unary operators get labeled as such and that operators like '++'
@@ -2279,9 +2279,7 @@ TEST_F(TokenAnnotatorTest, UnderstandTableGenTokens) {
TestLexer Lexer(Allocator, Buffers, Style);
AdditionalKeywords Keywords(Lexer.IdentTable);
- auto Annotate = [&Lexer](llvm::StringRef Code) {
- return Lexer.annotate(Code);
- };
+ auto Annotate = [&Lexer](StringRef Code) { return Lexer.annotate(Code); };
// Additional keywords representation test.
auto Tokens = Annotate("def foo : Bar<1>;");
@@ -2357,7 +2355,7 @@ TEST_F(TokenAnnotatorTest, UnderstandTableGenTokens) {
Tokens = Annotate("!cond");
EXPECT_TOKEN(Tokens[0], tok::identifier, TT_TableGenCondOperator);
- auto AnnotateValue = [this, &Style](llvm::StringRef Code) {
+ auto AnnotateValue = [this, &Style](StringRef Code) {
// Values are annotated only in specific context.
auto Result = annotate(("def X { let V = " + Code + "; }").str(), Style);
return decltype(Result){Result.begin() + 6, Result.end() - 3};
@@ -2581,15 +2579,28 @@ TEST_F(TokenAnnotatorTest, UnderstandsLabels) {
auto Tokens = annotate("{ x: break; }");
ASSERT_EQ(Tokens.size(), 7u) << Tokens;
EXPECT_TOKEN(Tokens[2], tok::colon, TT_GotoLabelColon);
+
Tokens = annotate("{ case x: break; }");
ASSERT_EQ(Tokens.size(), 8u) << Tokens;
EXPECT_TOKEN(Tokens[3], tok::colon, TT_CaseLabelColon);
+
Tokens = annotate("{ x: { break; } }");
ASSERT_EQ(Tokens.size(), 9u) << Tokens;
EXPECT_TOKEN(Tokens[2], tok::colon, TT_GotoLabelColon);
+
Tokens = annotate("{ case x: { break; } }");
ASSERT_EQ(Tokens.size(), 10u) << Tokens;
EXPECT_TOKEN(Tokens[3], tok::colon, TT_CaseLabelColon);
+
+ Tokens = annotate("#define FOO label:");
+ ASSERT_EQ(Tokens.size(), 6u) << Tokens;
+ EXPECT_TOKEN(Tokens[4], tok::colon, TT_GotoLabelColon);
+
+ Tokens = annotate("#define FOO \\\n"
+ "label: \\\n"
+ " break;");
+ ASSERT_EQ(Tokens.size(), 8u) << Tokens;
+ EXPECT_TOKEN(Tokens[4], tok::colon, TT_GotoLabelColon);
}
TEST_F(TokenAnnotatorTest, UnderstandsNestedBlocks) {
@@ -2649,7 +2660,7 @@ TEST_F(TokenAnnotatorTest, UnderstandDesignatedInitializers) {
}
TEST_F(TokenAnnotatorTest, UnderstandsJavaScript) {
- auto Annotate = [this](llvm::StringRef Code) {
+ auto Annotate = [this](StringRef Code) {
return annotate(Code, getLLVMStyle(FormatStyle::LK_JavaScript));
};
@@ -3004,6 +3015,60 @@ TEST_F(TokenAnnotatorTest, SwitchExpression) {
EXPECT_TOKEN(Tokens[20], tok::arrow, TT_CaseLabelArrow);
}
+TEST_F(TokenAnnotatorTest, CppAltOperatorKeywords) {
+ auto Tokens = annotate("a = b and c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::ampamp, TT_BinaryOperator);
+
+ Tokens = annotate("a = b and_eq c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::ampequal, TT_BinaryOperator);
+
+ Tokens = annotate("a = b bitand c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::amp, TT_BinaryOperator);
+
+ Tokens = annotate("a = b bitor c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::pipe, TT_BinaryOperator);
+
+ Tokens = annotate("a = b compl c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::tilde, TT_UnaryOperator);
+
+ Tokens = annotate("a = b not c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::exclaim, TT_UnaryOperator);
+
+ Tokens = annotate("a = b not_eq c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::exclaimequal, TT_BinaryOperator);
+
+ Tokens = annotate("a = b or c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::pipepipe, TT_BinaryOperator);
+
+ Tokens = annotate("a = b or_eq c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::pipeequal, TT_BinaryOperator);
+
+ Tokens = annotate("a = b xor c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::caret, TT_BinaryOperator);
+
+ Tokens = annotate("a = b xor_eq c;");
+ ASSERT_EQ(Tokens.size(), 7u);
+ EXPECT_TOKEN(Tokens[3], tok::caretequal, TT_BinaryOperator);
+
+ Tokens = annotate("xor = foo;");
+ ASSERT_EQ(Tokens.size(), 5u);
+ EXPECT_TOKEN(Tokens[0], tok::identifier, TT_Unknown);
+
+ Tokens = annotate("int xor = foo;");
+ ASSERT_EQ(Tokens.size(), 6u);
+ EXPECT_TOKEN(Tokens[1], tok::identifier, TT_StartOfName);
+}
+
} // namespace
} // namespace format
} // namespace clang
diff --git a/clang/unittests/Format/UsingDeclarationsSorterTest.cpp b/clang/unittests/Format/UsingDeclarationsSorterTest.cpp
index c0c0de7076fe..ddcecc0208b0 100644
--- a/clang/unittests/Format/UsingDeclarationsSorterTest.cpp
+++ b/clang/unittests/Format/UsingDeclarationsSorterTest.cpp
@@ -17,22 +17,22 @@ namespace clang {
namespace format {
namespace {
-class UsingDeclarationsSorterTest : public ::testing::Test {
+class UsingDeclarationsSorterTest : public testing::Test {
protected:
- std::string sortUsingDeclarations(llvm::StringRef Code,
+ std::string sortUsingDeclarations(StringRef Code,
const std::vector<tooling::Range> &Ranges,
const FormatStyle &Style = getLLVMStyle()) {
LLVM_DEBUG(llvm::errs() << "---\n");
LLVM_DEBUG(llvm::errs() << Code << "\n\n");
tooling::Replacements Replaces =
- clang::format::sortUsingDeclarations(Style, Code, Ranges, "<stdin>");
+ format::sortUsingDeclarations(Style, Code, Ranges, "<stdin>");
auto Result = applyAllReplacements(Code, Replaces);
EXPECT_TRUE(static_cast<bool>(Result));
LLVM_DEBUG(llvm::errs() << "\n" << *Result << "\n\n");
return *Result;
}
- std::string sortUsingDeclarations(llvm::StringRef Code,
+ std::string sortUsingDeclarations(StringRef Code,
const FormatStyle &Style = getLLVMStyle()) {
return sortUsingDeclarations(Code,
/*Ranges=*/{1, tooling::Range(0, Code.size())},
diff --git a/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp b/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
index 4512acfd19a1..f564689fff7c 100644
--- a/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
+++ b/clang/utils/TableGen/ClangDiagnosticsEmitter.cpp
@@ -1086,7 +1086,7 @@ Piece *DiagnosticTextBuilder::DiagText::parseDiagText(StringRef &Text,
PluralPiece *Plural = New<PluralPiece>();
do {
Text = Text.drop_front(); // '{' or '|'
- size_t End = Text.find_first_of(":");
+ size_t End = Text.find_first_of(':');
if (End == StringRef::npos)
Builder.PrintFatalError("expected ':' while parsing %plural");
++End;
diff --git a/clang/www/cxx_dr_status.html b/clang/www/cxx_dr_status.html
index 5d517d358672..9d458330f537 100755
--- a/clang/www/cxx_dr_status.html
+++ b/clang/www/cxx_dr_status.html
@@ -12890,11 +12890,11 @@ and <I>POD class</I></td>
<td>Virtual bases in destructors and defaulted assignment operators</td>
<td class="full" align="center">Yes</td>
</tr>
- <tr class="open" id="2181">
+ <tr id="2181">
<td><a href="https://cplusplus.github.io/CWG/issues/2181.html">2181</a></td>
- <td>drafting</td>
+ <td>C++20</td>
<td>Normative requirements in an informative Annex</td>
- <td align="center">Not resolved</td>
+ <td class="unknown" align="center">Unknown</td>
</tr>
<tr class="open" id="2182">
<td><a href="https://cplusplus.github.io/CWG/issues/2182.html">2182</a></td>
@@ -17021,13 +17021,13 @@ objects</td>
</tr>
<tr class="open" id="2869">
<td><a href="https://cplusplus.github.io/CWG/issues/2869.html">2869</a></td>
- <td>review</td>
+ <td>tentatively ready</td>
<td><TT>this</TT> in local classes</td>
<td align="center">Not resolved</td>
</tr>
<tr class="open" id="2870">
<td><a href="https://cplusplus.github.io/CWG/issues/2870.html">2870</a></td>
- <td>review</td>
+ <td>tentatively ready</td>
<td>Combining absent <I>encoding-prefix</I>es</td>
<td align="center">Not resolved</td>
</tr>
@@ -17039,7 +17039,7 @@ objects</td>
</tr>
<tr class="open" id="2872">
<td><a href="https://cplusplus.github.io/CWG/issues/2872.html">2872</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Linkage and unclear "can be referred to"</td>
<td align="center">Not resolved</td>
</tr>
@@ -17051,25 +17051,25 @@ objects</td>
</tr>
<tr class="open" id="2874">
<td><a href="https://cplusplus.github.io/CWG/issues/2874.html">2874</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Qualified declarations of partial specializations</td>
<td align="center">Not resolved</td>
</tr>
<tr class="open" id="2875">
<td><a href="https://cplusplus.github.io/CWG/issues/2875.html">2875</a></td>
- <td>open</td>
- <td>Missing support for round-tripping nullptr through indirection/address operators</td>
+ <td>tentatively ready</td>
+ <td>Missing support for round-tripping null pointer values through indirection/address operators</td>
<td align="center">Not resolved</td>
</tr>
<tr class="open" id="2876">
<td><a href="https://cplusplus.github.io/CWG/issues/2876.html">2876</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Disambiguation of <TT>T x = delete("text")</TT></td>
<td align="center">Not resolved</td>
</tr>
<tr class="open" id="2877">
<td><a href="https://cplusplus.github.io/CWG/issues/2877.html">2877</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Type-only lookup for <I>using-enum-declarator</I></td>
<td align="center">Not resolved</td>
</tr>
@@ -17093,7 +17093,7 @@ objects</td>
</tr>
<tr class="open" id="2881">
<td><a href="https://cplusplus.github.io/CWG/issues/2881.html">2881</a></td>
- <td>open</td>
+ <td>tentatively ready</td>
<td>Type restrictions for the explicit object parameter of a lambda</td>
<td align="center">Not resolved</td>
</tr>
@@ -17109,15 +17109,15 @@ objects</td>
<td>Definition of "odr-usable" ignores lambda scopes</td>
<td align="center">Not resolved</td>
</tr>
- <tr class="open" id="2884">
+ <tr id="2884">
<td><a href="https://cplusplus.github.io/CWG/issues/2884.html">2884</a></td>
- <td>open</td>
+ <td>dup</td>
<td>Qualified declarations of partial specializations</td>
- <td align="center">Not resolved</td>
+ <td class="unknown" align="center">Unknown</td>
</tr>
<tr class="open" id="2885">
<td><a href="https://cplusplus.github.io/CWG/issues/2885.html">2885</a></td>
- <td>open</td>
+ <td>review</td>
<td>Non-eligible trivial default constructors</td>
<td align="center">Not resolved</td>
</tr>
@@ -17150,6 +17150,12 @@ objects</td>
<td>open</td>
<td>Defining members of local classes</td>
<td align="center">Not resolved</td>
+ </tr>
+ <tr class="open" id="2891">
+ <td><a href="https://cplusplus.github.io/CWG/issues/2891.html">2891</a></td>
+ <td>review</td>
+ <td>Normative status of implementation limits</td>
+ <td align="center">Not resolved</td>
</tr></table>
</div>
diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
index 63475f434cd1..682df8c6e034 100644
--- a/compiler-rt/lib/dfsan/dfsan_allocator.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_allocator.cpp
@@ -45,7 +45,7 @@ const uptr kAllocatorSpace = 0xE00000000000ULL;
#else
const uptr kAllocatorSpace = 0x700000000000ULL;
#endif
-const uptr kMaxAllowedMallocSize = 8UL << 30;
+const uptr kMaxAllowedMallocSize = 1ULL << 40;
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
diff --git a/compiler-rt/lib/dfsan/dfsan_custom.cpp b/compiler-rt/lib/dfsan/dfsan_custom.cpp
index 3af26e9f64c9..af3c1f4d1673 100644
--- a/compiler-rt/lib/dfsan/dfsan_custom.cpp
+++ b/compiler-rt/lib/dfsan/dfsan_custom.cpp
@@ -1901,17 +1901,27 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfso_nanosleep(
return __dfsw_nanosleep(req, rem, req_label, rem_label, ret_label);
}
-static void clear_msghdr_labels(size_t bytes_written, struct msghdr *msg) {
+static void clear_msghdr_labels(size_t bytes_written, struct msghdr *msg,
+ int flags) {
dfsan_set_label(0, msg, sizeof(*msg));
dfsan_set_label(0, msg->msg_name, msg->msg_namelen);
dfsan_set_label(0, msg->msg_control, msg->msg_controllen);
- for (size_t i = 0; bytes_written > 0; ++i) {
- assert(i < msg->msg_iovlen);
+ for (size_t i = 0; i < msg->msg_iovlen; ++i) {
struct iovec *iov = &msg->msg_iov[i];
- size_t iov_written =
- bytes_written < iov->iov_len ? bytes_written : iov->iov_len;
+ size_t iov_written = iov->iov_len;
+
+ // When MSG_TRUNC is not set, we want to avoid setting 0 label on bytes that
+ // may not have changed, using bytes_written to bound the 0 label write.
+ // When MSG_TRUNC flag is set, bytes_written may be larger than the buffer,
+ // and should not be used as a bound.
+ if (!(MSG_TRUNC & flags)) {
+ if (bytes_written < iov->iov_len) {
+ iov_written = bytes_written;
+ }
+ bytes_written -= iov_written;
+ }
+
dfsan_set_label(0, iov->iov_base, iov_written);
- bytes_written -= iov_written;
}
}
@@ -1923,7 +1933,7 @@ SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_recvmmsg(
int ret = recvmmsg(sockfd, msgvec, vlen, flags, timeout);
for (int i = 0; i < ret; ++i) {
dfsan_set_label(0, &msgvec[i].msg_len, sizeof(msgvec[i].msg_len));
- clear_msghdr_labels(msgvec[i].msg_len, &msgvec[i].msg_hdr);
+ clear_msghdr_labels(msgvec[i].msg_len, &msgvec[i].msg_hdr, flags);
}
*ret_label = 0;
return ret;
@@ -1947,7 +1957,7 @@ SANITIZER_INTERFACE_ATTRIBUTE ssize_t __dfsw_recvmsg(
dfsan_label msg_label, dfsan_label flags_label, dfsan_label *ret_label) {
ssize_t ret = recvmsg(sockfd, msg, flags);
if (ret >= 0)
- clear_msghdr_labels(ret, msg);
+ clear_msghdr_labels(ret, msg, flags);
*ret_label = 0;
return ret;
}
diff --git a/compiler-rt/lib/lsan/lsan_allocator.cpp b/compiler-rt/lib/lsan/lsan_allocator.cpp
index 12d579a9385b..493bf5f9efc5 100644
--- a/compiler-rt/lib/lsan/lsan_allocator.cpp
+++ b/compiler-rt/lib/lsan/lsan_allocator.cpp
@@ -31,7 +31,7 @@ static const uptr kMaxAllowedMallocSize = 1ULL << 30;
#elif defined(__mips64) || defined(__aarch64__)
static const uptr kMaxAllowedMallocSize = 4ULL << 30;
#else
-static const uptr kMaxAllowedMallocSize = 8ULL << 30;
+static const uptr kMaxAllowedMallocSize = 1ULL << 40;
#endif
static Allocator allocator;
diff --git a/compiler-rt/lib/msan/msan_allocator.cpp b/compiler-rt/lib/msan/msan_allocator.cpp
index b1bc5b9390f7..8350106dc817 100644
--- a/compiler-rt/lib/msan/msan_allocator.cpp
+++ b/compiler-rt/lib/msan/msan_allocator.cpp
@@ -71,7 +71,7 @@ static const uptr kAllocatorSpace = 0x700000000000ULL;
#else
static const uptr kAllocatorSpace = 0x600000000000ULL;
#endif
-static const uptr kMaxAllowedMallocSize = 8UL << 30;
+static const uptr kMaxAllowedMallocSize = 1ULL << 40;
struct AP64 { // Allocator64 parameters. Deliberately using a short name.
static const uptr kSpaceBeg = kAllocatorSpace;
diff --git a/compiler-rt/test/dfsan/custom.cpp b/compiler-rt/test/dfsan/custom.cpp
index f544e481b726..cede0d64dbcf 100644
--- a/compiler-rt/test/dfsan/custom.cpp
+++ b/compiler-rt/test/dfsan/custom.cpp
@@ -768,26 +768,53 @@ void test_recvmsg() {
ssize_t sent = sendmsg(sockfds[0], &smsg, 0);
assert(sent > 0);
- char rbuf[128];
- struct iovec riovs[2] = {{&rbuf[0], 4}, {&rbuf[4], 4}};
- struct msghdr rmsg = {};
- rmsg.msg_iov = riovs;
- rmsg.msg_iovlen = 2;
-
- dfsan_set_label(i_label, rbuf, sizeof(rbuf));
- dfsan_set_label(i_label, &rmsg, sizeof(rmsg));
-
- DEFINE_AND_SAVE_ORIGINS(rmsg)
-
- ssize_t received = recvmsg(sockfds[1], &rmsg, 0);
- assert(received == sent);
- assert(memcmp(sbuf, rbuf, 8) == 0);
- ASSERT_ZERO_LABEL(received);
- ASSERT_READ_ZERO_LABEL(&rmsg, sizeof(rmsg));
- ASSERT_READ_ZERO_LABEL(&rbuf[0], 8);
- ASSERT_READ_LABEL(&rbuf[8], 1, i_label);
-
- ASSERT_SAVED_ORIGINS(rmsg)
+ {
+ char rpbuf[2];
+ struct iovec peek_iov;
+ peek_iov.iov_base = rpbuf;
+ peek_iov.iov_len = 2;
+
+ struct msghdr peek_header = {};
+ peek_header.msg_iov = &peek_iov;
+ peek_header.msg_iovlen = 1;
+
+ dfsan_set_label(i_label, rpbuf, sizeof(rpbuf));
+ dfsan_set_label(i_label, &peek_header, sizeof(peek_header));
+
+ DEFINE_AND_SAVE_ORIGINS(peek_header)
+
+ ssize_t received = recvmsg(sockfds[1], &peek_header, MSG_PEEK | MSG_TRUNC);
+ assert(received == sent);
+ assert(memcmp(sbuf, rpbuf, 2) == 0);
+ ASSERT_ZERO_LABEL(received);
+ ASSERT_READ_ZERO_LABEL(&peek_header, sizeof(peek_header));
+ ASSERT_READ_ZERO_LABEL(&rpbuf[0], 0);
+
+ ASSERT_SAVED_ORIGINS(peek_header)
+ }
+
+ {
+ char rbuf[128];
+ struct iovec riovs[2] = {{&rbuf[0], 4}, {&rbuf[4], 4}};
+ struct msghdr rmsg = {};
+ rmsg.msg_iov = riovs;
+ rmsg.msg_iovlen = 2;
+
+ dfsan_set_label(i_label, rbuf, sizeof(rbuf));
+ dfsan_set_label(i_label, &rmsg, sizeof(rmsg));
+
+ DEFINE_AND_SAVE_ORIGINS(rmsg)
+
+ ssize_t received = recvmsg(sockfds[1], &rmsg, 0);
+ assert(received == sent);
+ assert(memcmp(sbuf, rbuf, 8) == 0);
+ ASSERT_ZERO_LABEL(received);
+ ASSERT_READ_ZERO_LABEL(&rmsg, sizeof(rmsg));
+ ASSERT_READ_ZERO_LABEL(&rbuf[0], 8);
+ ASSERT_READ_LABEL(&rbuf[8], 1, i_label);
+
+ ASSERT_SAVED_ORIGINS(rmsg)
+ }
close(sockfds[0]);
close(sockfds[1]);
diff --git a/flang/include/flang/Lower/AbstractConverter.h b/flang/include/flang/Lower/AbstractConverter.h
index f43dfd8343ec..0bc68de6938d 100644
--- a/flang/include/flang/Lower/AbstractConverter.h
+++ b/flang/include/flang/Lower/AbstractConverter.h
@@ -131,8 +131,7 @@ public:
/// For a given symbol, check if it is present in the inner-most
/// level of the symbol map.
- virtual bool
- isPresentShallowLookup(const Fortran::semantics::Symbol &sym) = 0;
+ virtual bool isPresentShallowLookup(Fortran::semantics::Symbol &sym) = 0;
/// Collect the set of symbols with \p flag in \p eval
/// region if \p collectSymbols is true. Otherwise, collect the
diff --git a/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td b/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td
index 72157bce4f76..37b8da018195 100644
--- a/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td
+++ b/flang/include/flang/Optimizer/Dialect/CUF/CUFOps.td
@@ -152,15 +152,21 @@ def cuf_DataTransferOp : cuf_Op<"data_transfer", []> {
a = adev ! transfer device to host
bdev = adev ! transfer device to device
```
+
+ When the data transfer is done on data hold by descriptors, the LHS data
+ hold by the descriptor are updated. When required, the LHS decriptor is also
+ updated.
}];
- let arguments = (ins Arg<AnyReferenceLike, "", [MemWrite]>:$src,
- Arg<AnyReferenceLike, "", [MemRead]>:$dst,
+ let arguments = (ins Arg<AnyType, "", [MemRead]>:$src,
+ Arg<AnyRefOrBoxType, "", [MemWrite]>:$dst,
cuf_DataTransferKindAttr:$transfer_kind);
let assemblyFormat = [{
$src `to` $dst attr-dict `:` type(operands)
}];
+
+ let hasVerifier = 1;
}
def cuf_KernelLaunchOp : cuf_Op<"kernel_launch", [CallOpInterface,
diff --git a/flang/include/flang/Optimizer/HLFIR/Passes.h b/flang/include/flang/Optimizer/HLFIR/Passes.h
index ef47c94b67a8..f0736c782b6c 100644
--- a/flang/include/flang/Optimizer/HLFIR/Passes.h
+++ b/flang/include/flang/Optimizer/HLFIR/Passes.h
@@ -25,7 +25,6 @@ namespace hlfir {
std::unique_ptr<mlir::Pass> createConvertHLFIRtoFIRPass();
std::unique_ptr<mlir::Pass> createBufferizeHLFIRPass();
std::unique_ptr<mlir::Pass> createLowerHLFIRIntrinsicsPass();
-std::unique_ptr<mlir::Pass> createInlineElementalsPass();
std::unique_ptr<mlir::Pass> createLowerHLFIROrderedAssignmentsPass();
std::unique_ptr<mlir::Pass> createOptimizedBufferizationPass();
diff --git a/flang/include/flang/Optimizer/HLFIR/Passes.td b/flang/include/flang/Optimizer/HLFIR/Passes.td
index 806d1f202975..0d4496a44c20 100644
--- a/flang/include/flang/Optimizer/HLFIR/Passes.td
+++ b/flang/include/flang/Optimizer/HLFIR/Passes.td
@@ -50,9 +50,8 @@ def SimplifyHLFIRIntrinsics : Pass<"simplify-hlfir-intrinsics"> {
let summary = "Simplify HLFIR intrinsic operations that don't need to result in runtime calls";
}
-def InlineElementals : Pass<"inline-elementals", "::mlir::func::FuncOp"> {
+def InlineElementals : Pass<"inline-elementals"> {
let summary = "Inline chained hlfir.elemental operations";
- let constructor = "hlfir::createInlineElementalsPass()";
}
#endif //FORTRAN_DIALECT_HLFIR_PASSES
diff --git a/flang/include/flang/Semantics/openmp-directive-sets.h b/flang/include/flang/Semantics/openmp-directive-sets.h
index 842d251b682a..da66e0eda321 100644
--- a/flang/include/flang/Semantics/openmp-directive-sets.h
+++ b/flang/include/flang/Semantics/openmp-directive-sets.h
@@ -205,9 +205,11 @@ static const OmpDirectiveSet compositeConstructSet{
};
static const OmpDirectiveSet blockConstructSet{
+ Directive::OMPD_masked,
Directive::OMPD_master,
Directive::OMPD_ordered,
Directive::OMPD_parallel,
+ Directive::OMPD_parallel_masked,
Directive::OMPD_parallel_workshare,
Directive::OMPD_single,
Directive::OMPD_target,
diff --git a/flang/include/flang/Tools/CLOptions.inc b/flang/include/flang/Tools/CLOptions.inc
index e0ab9d5f0429..3900b172917e 100644
--- a/flang/include/flang/Tools/CLOptions.inc
+++ b/flang/include/flang/Tools/CLOptions.inc
@@ -320,7 +320,7 @@ inline void createHLFIRToFIRPassPipeline(
addNestedPassToAllTopLevelOperations(
pm, hlfir::createSimplifyHLFIRIntrinsics);
}
- pm.addPass(hlfir::createInlineElementalsPass());
+ addNestedPassToAllTopLevelOperations(pm, hlfir::createInlineElementals);
if (optLevel.isOptimizingForSpeed()) {
addCanonicalizerPassWithoutRegionSimplification(pm);
pm.addPass(mlir::createCSEPass());
diff --git a/flang/lib/Lower/Bridge.cpp b/flang/lib/Lower/Bridge.cpp
index 7ded9adcd5c2..898b37504a6e 100644
--- a/flang/lib/Lower/Bridge.cpp
+++ b/flang/lib/Lower/Bridge.cpp
@@ -57,6 +57,7 @@
#include "flang/Semantics/symbol.h"
#include "flang/Semantics/tools.h"
#include "mlir/Dialect/ControlFlow/IR/ControlFlowOps.h"
+#include "mlir/IR/Matchers.h"
#include "mlir/IR/PatternMatch.h"
#include "mlir/Parser/Parser.h"
#include "mlir/Transforms/RegionUtils.h"
@@ -601,8 +602,7 @@ public:
return typeConstructionStack;
}
- bool
- isPresentShallowLookup(const Fortran::semantics::Symbol &sym) override final {
+ bool isPresentShallowLookup(Fortran::semantics::Symbol &sym) override final {
return bool(shallowLookupSymbol(sym));
}
@@ -1303,6 +1303,43 @@ private:
genBranch(targetEval.block);
}
+ /// A construct contains nested evaluations. Some of these evaluations
+ /// may start a new basic block, others will add code to an existing
+ /// block.
+ /// Collect the list of nested evaluations that are last in their block,
+ /// organize them into two sets:
+ /// 1. Exiting evaluations: they may need a branch exiting from their
+ /// parent construct,
+ /// 2. Fall-through evaluations: they will continue to the following
+ /// evaluation. They may still need a branch, but they do not exit
+ /// the construct. They appear in cases where the following evaluation
+ /// is a target of some branch.
+ void collectFinalEvaluations(
+ Fortran::lower::pft::Evaluation &construct,
+ llvm::SmallVector<Fortran::lower::pft::Evaluation *> &exits,
+ llvm::SmallVector<Fortran::lower::pft::Evaluation *> &fallThroughs) {
+ Fortran::lower::pft::EvaluationList &nested =
+ construct.getNestedEvaluations();
+ if (nested.empty())
+ return;
+
+ Fortran::lower::pft::Evaluation *exit = construct.constructExit;
+ Fortran::lower::pft::Evaluation *previous = &nested.front();
+
+ for (auto it = ++nested.begin(), end = nested.end(); it != end;
+ previous = &*it++) {
+ if (it->block == nullptr)
+ continue;
+ // "*it" starts a new block, check what to do with "previous"
+ if (it->isIntermediateConstructStmt() && previous != exit)
+ exits.push_back(previous);
+ else if (previous->lexicalSuccessor && previous->lexicalSuccessor->block)
+ fallThroughs.push_back(previous);
+ }
+ if (previous != exit)
+ exits.push_back(previous);
+ }
+
/// Generate a SelectOp or branch sequence that compares \p selector against
/// values in \p valueList and targets corresponding labels in \p labelList.
/// If no value matches the selector, branch to \p defaultEval.
@@ -2110,6 +2147,9 @@ private:
}
// Unstructured branch sequence.
+ llvm::SmallVector<Fortran::lower::pft::Evaluation *> exits, fallThroughs;
+ collectFinalEvaluations(eval, exits, fallThroughs);
+
for (Fortran::lower::pft::Evaluation &e : eval.getNestedEvaluations()) {
auto genIfBranch = [&](mlir::Value cond) {
if (e.lexicalSuccessor == e.controlSuccessor) // empty block -> exit
@@ -2130,6 +2170,12 @@ private:
genIfBranch(genIfCondition(s));
} else {
genFIR(e);
+ if (blockIsUnterminated()) {
+ if (llvm::is_contained(exits, &e))
+ genConstructExitBranch(*eval.constructExit);
+ else if (llvm::is_contained(fallThroughs, &e))
+ genBranch(e.lexicalSuccessor->block);
+ }
}
}
}
@@ -2138,11 +2184,21 @@ private:
Fortran::lower::pft::Evaluation &eval = getEval();
Fortran::lower::StatementContext stmtCtx;
pushActiveConstruct(eval, stmtCtx);
+
+ llvm::SmallVector<Fortran::lower::pft::Evaluation *> exits, fallThroughs;
+ collectFinalEvaluations(eval, exits, fallThroughs);
+
for (Fortran::lower::pft::Evaluation &e : eval.getNestedEvaluations()) {
if (e.getIf<Fortran::parser::EndSelectStmt>())
maybeStartBlock(e.block);
else
genFIR(e);
+ if (blockIsUnterminated()) {
+ if (llvm::is_contained(exits, &e))
+ genConstructExitBranch(*eval.constructExit);
+ else if (llvm::is_contained(fallThroughs, &e))
+ genBranch(e.lexicalSuccessor->block);
+ }
}
popActiveConstruct();
}
@@ -3008,6 +3064,10 @@ private:
}
pushActiveConstruct(getEval(), stmtCtx);
+ llvm::SmallVector<Fortran::lower::pft::Evaluation *> exits, fallThroughs;
+ collectFinalEvaluations(getEval(), exits, fallThroughs);
+ Fortran::lower::pft::Evaluation &constructExit = *getEval().constructExit;
+
for (Fortran::lower::pft::Evaluation &eval :
getEval().getNestedEvaluations()) {
setCurrentPosition(eval.position);
@@ -3204,6 +3264,12 @@ private:
} else {
genFIR(eval);
}
+ if (blockIsUnterminated()) {
+ if (llvm::is_contained(exits, &eval))
+ genConstructExitBranch(constructExit);
+ else if (llvm::is_contained(fallThroughs, &eval))
+ genBranch(eval.lexicalSuccessor->block);
+ }
}
popActiveConstruct();
}
@@ -3717,21 +3783,36 @@ private:
hlfir::Entity &lhs, hlfir::Entity &rhs) {
bool lhsIsDevice = Fortran::evaluate::HasCUDAAttrs(assign.lhs);
bool rhsIsDevice = Fortran::evaluate::HasCUDAAttrs(assign.rhs);
- if (rhs.isBoxAddressOrValue() || lhs.isBoxAddressOrValue())
- TODO(loc, "CUDA data transfler with descriptors");
+
+ auto getRefIfLoaded = [](mlir::Value val) -> mlir::Value {
+ if (auto loadOp =
+ mlir::dyn_cast_or_null<fir::LoadOp>(val.getDefiningOp()))
+ return loadOp.getMemref();
+ return val;
+ };
+
+ mlir::Value rhsVal = getRefIfLoaded(rhs.getBase());
+ mlir::Value lhsVal = getRefIfLoaded(lhs.getBase());
// device = host
if (lhsIsDevice && !rhsIsDevice) {
auto transferKindAttr = cuf::DataTransferKindAttr::get(
builder.getContext(), cuf::DataTransferKind::HostDevice);
if (!rhs.isVariable()) {
- auto associate = hlfir::genAssociateExpr(
- loc, builder, rhs, rhs.getType(), ".cuf_host_tmp");
- builder.create<cuf::DataTransferOp>(loc, associate.getBase(), lhs,
- transferKindAttr);
- builder.create<hlfir::EndAssociateOp>(loc, associate);
+ // Special case if the rhs is a constant.
+ if (matchPattern(rhs.getDefiningOp(), mlir::m_Constant())) {
+ builder.create<cuf::DataTransferOp>(loc, rhs, lhsVal,
+ transferKindAttr);
+ } else {
+ auto associate = hlfir::genAssociateExpr(
+ loc, builder, rhs, rhs.getType(), ".cuf_host_tmp");
+ builder.create<cuf::DataTransferOp>(loc, associate.getBase(), lhsVal,
+ transferKindAttr);
+ builder.create<hlfir::EndAssociateOp>(loc, associate);
+ }
} else {
- builder.create<cuf::DataTransferOp>(loc, rhs, lhs, transferKindAttr);
+ builder.create<cuf::DataTransferOp>(loc, rhsVal, lhsVal,
+ transferKindAttr);
}
return;
}
@@ -3740,26 +3821,18 @@ private:
if (!lhsIsDevice && rhsIsDevice) {
auto transferKindAttr = cuf::DataTransferKindAttr::get(
builder.getContext(), cuf::DataTransferKind::DeviceHost);
- if (!rhs.isVariable()) {
- // evaluateRhs loads scalar. Look for the memory reference to be used in
- // the transfer.
- if (mlir::isa_and_nonnull<fir::LoadOp>(rhs.getDefiningOp())) {
- auto loadOp = mlir::dyn_cast<fir::LoadOp>(rhs.getDefiningOp());
- builder.create<cuf::DataTransferOp>(loc, loadOp.getMemref(), lhs,
- transferKindAttr);
- return;
- }
- } else {
- builder.create<cuf::DataTransferOp>(loc, rhs, lhs, transferKindAttr);
- }
+ builder.create<cuf::DataTransferOp>(loc, rhsVal, lhsVal,
+ transferKindAttr);
return;
}
+ // device = device
if (lhsIsDevice && rhsIsDevice) {
assert(rhs.isVariable() && "CUDA Fortran assignment rhs is not legal");
auto transferKindAttr = cuf::DataTransferKindAttr::get(
builder.getContext(), cuf::DataTransferKind::DeviceDevice);
- builder.create<cuf::DataTransferOp>(loc, rhs, lhs, transferKindAttr);
+ builder.create<cuf::DataTransferOp>(loc, rhsVal, lhsVal,
+ transferKindAttr);
return;
}
llvm_unreachable("Unhandled CUDA data transfer");
@@ -4536,28 +4609,6 @@ private:
setCurrentEval(eval);
setCurrentPosition(eval.position);
eval.visit([&](const auto &stmt) { genFIR(stmt); });
-
- // Generate an end-of-block branch for several special cases. For
- // constructs, this can be done for either the end construct statement,
- // or for the construct itself, which will skip this code if the
- // end statement was visited first and generated a branch.
- Fortran::lower::pft::Evaluation *successor = [&]() {
- if (eval.isConstruct() ||
- (eval.isDirective() && eval.hasNestedEvaluations()))
- return eval.getLastNestedEvaluation().lexicalSuccessor;
- return eval.lexicalSuccessor;
- }();
-
- if (successor && blockIsUnterminated()) {
- if (successor->isIntermediateConstructStmt() &&
- successor->parentConstruct->lowerAsUnstructured())
- // Exit from an intermediate unstructured IF or SELECT construct block.
- genBranch(successor->parentConstruct->constructExit->block);
- else if (unstructuredContext && eval.isConstructStmt() &&
- successor == eval.controlSuccessor)
- // Exit from a degenerate, empty construct block.
- genBranch(eval.parentConstruct->constructExit->block);
- }
}
/// Map mlir function block arguments to the corresponding Fortran dummy
diff --git a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
index 6a8c3e3ac9ba..2bdc523bf371 100644
--- a/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
+++ b/flang/lib/Lower/OpenMP/DataSharingProcessor.cpp
@@ -29,10 +29,9 @@ void DataSharingProcessor::processStep1(
collectSymbolsForPrivatization();
collectDefaultSymbols();
collectImplicitSymbols();
- collectPreDeterminedSymbols();
-
privatize(clauseOps, privateSyms);
-
+ defaultPrivatize(clauseOps, privateSyms);
+ implicitPrivatize(clauseOps, privateSyms);
insertBarrier();
}
@@ -58,7 +57,7 @@ void DataSharingProcessor::processStep2(mlir::Operation *op, bool isLoop) {
}
void DataSharingProcessor::insertDeallocs() {
- for (const semantics::Symbol *sym : allPrivatizedSymbols)
+ for (const semantics::Symbol *sym : privatizedSymbols)
if (semantics::IsAllocatable(sym->GetUltimate())) {
if (!useDelayedPrivatization) {
converter.createHostAssociateVarCloneDealloc(*sym);
@@ -93,6 +92,10 @@ void DataSharingProcessor::insertDeallocs() {
}
void DataSharingProcessor::cloneSymbol(const semantics::Symbol *sym) {
+ // Privatization for symbols which are pre-determined (like loop index
+ // variables) happen separately, for everything else privatize here.
+ if (sym->test(semantics::Symbol::Flag::OmpPreDetermined))
+ return;
bool success = converter.createHostAssociateVarClone(*sym);
(void)success;
assert(success && "Privatization failed due to existing binding");
@@ -123,24 +126,20 @@ void DataSharingProcessor::collectSymbolsForPrivatization() {
for (const omp::Clause &clause : clauses) {
if (const auto &privateClause =
std::get_if<omp::clause::Private>(&clause.u)) {
- collectOmpObjectListSymbol(privateClause->v, explicitlyPrivatizedSymbols);
+ collectOmpObjectListSymbol(privateClause->v, privatizedSymbols);
} else if (const auto &firstPrivateClause =
std::get_if<omp::clause::Firstprivate>(&clause.u)) {
- collectOmpObjectListSymbol(firstPrivateClause->v,
- explicitlyPrivatizedSymbols);
+ collectOmpObjectListSymbol(firstPrivateClause->v, privatizedSymbols);
} else if (const auto &lastPrivateClause =
std::get_if<omp::clause::Lastprivate>(&clause.u)) {
const ObjectList &objects = std::get<ObjectList>(lastPrivateClause->t);
- collectOmpObjectListSymbol(objects, explicitlyPrivatizedSymbols);
+ collectOmpObjectListSymbol(objects, privatizedSymbols);
hasLastPrivateOp = true;
} else if (std::get_if<omp::clause::Collapse>(&clause.u)) {
hasCollapse = true;
}
}
- for (auto *sym : explicitlyPrivatizedSymbols)
- allPrivatizedSymbols.insert(sym);
-
if (hasCollapse && hasLastPrivateOp)
TODO(converter.getCurrentLocation(), "Collapse clause with lastprivate");
}
@@ -150,7 +149,7 @@ bool DataSharingProcessor::needBarrier() {
// initialization of firstprivate variables and post-update of lastprivate
// variables.
// Emit implicit barrier for linear clause. Maybe on somewhere else.
- for (const semantics::Symbol *sym : allPrivatizedSymbols) {
+ for (const semantics::Symbol *sym : privatizedSymbols) {
if (sym->test(semantics::Symbol::Flag::OmpFirstPrivate) &&
sym->test(semantics::Symbol::Flag::OmpLastPrivate))
return true;
@@ -226,7 +225,7 @@ void DataSharingProcessor::insertLastPrivateCompare(mlir::Operation *op) {
auto ifOp = firOpBuilder.create<fir::IfOp>(loc, cmpOp, /*else*/ false);
firOpBuilder.setInsertionPointToStart(&ifOp.getThenRegion().front());
assert(loopIV && "loopIV was not set");
- firOpBuilder.create<fir::StoreOp>(loopOp.getLoc(), v, loopIV);
+ firOpBuilder.createStoreWithConvert(loc, v, loopIV);
lastPrivIP = firOpBuilder.saveInsertionPoint();
} else if (mlir::isa<mlir::omp::SectionsOp>(op)) {
// Already handled by genOMP()
@@ -284,40 +283,10 @@ void DataSharingProcessor::collectSymbolsInNestedRegions(
if (nestedEval.isConstruct())
// Recursively look for OpenMP constructs within `nestedEval`'s region
collectSymbolsInNestedRegions(nestedEval, flag, symbolsInNestedRegions);
- else {
- bool isOrderedConstruct = [&]() {
- if (auto *ompConstruct =
- nestedEval.getIf<parser::OpenMPConstruct>()) {
- if (auto *ompBlockConstruct =
- std::get_if<parser::OpenMPBlockConstruct>(
- &ompConstruct->u)) {
- const auto &beginBlockDirective =
- std::get<parser::OmpBeginBlockDirective>(
- ompBlockConstruct->t);
- const auto origDirective =
- std::get<parser::OmpBlockDirective>(beginBlockDirective.t).v;
-
- return origDirective == llvm::omp::Directive::OMPD_ordered;
- }
- }
-
- return false;
- }();
-
- bool isCriticalConstruct = [&]() {
- if (auto *ompConstruct =
- nestedEval.getIf<parser::OpenMPConstruct>()) {
- return std::get_if<parser::OpenMPCriticalConstruct>(
- &ompConstruct->u) != nullptr;
- }
- return false;
- }();
-
- if (!isOrderedConstruct && !isCriticalConstruct)
- converter.collectSymbolSet(nestedEval, symbolsInNestedRegions, flag,
- /*collectSymbols=*/true,
- /*collectHostAssociatedSymbols=*/false);
- }
+ else
+ converter.collectSymbolSet(nestedEval, symbolsInNestedRegions, flag,
+ /*collectSymbols=*/true,
+ /*collectHostAssociatedSymbols=*/false);
}
}
}
@@ -353,39 +322,24 @@ void DataSharingProcessor::collectSymbols(
converter.collectSymbolSet(eval, allSymbols, flag,
/*collectSymbols=*/true,
/*collectHostAssociatedSymbols=*/true);
-
llvm::SetVector<const semantics::Symbol *> symbolsInNestedRegions;
collectSymbolsInNestedRegions(eval, flag, symbolsInNestedRegions);
// Filter-out symbols that must not be privatized.
bool collectImplicit = flag == semantics::Symbol::Flag::OmpImplicit;
- bool collectPreDetermined = flag == semantics::Symbol::Flag::OmpPreDetermined;
-
auto isPrivatizable = [](const semantics::Symbol &sym) -> bool {
return !semantics::IsProcedure(sym) &&
!sym.GetUltimate().has<semantics::DerivedTypeDetails>() &&
!sym.GetUltimate().has<semantics::NamelistDetails>() &&
!semantics::IsImpliedDoIndex(sym.GetUltimate());
};
-
- auto shouldCollectSymbol = [&](const semantics::Symbol *sym) {
- if (collectImplicit)
- return sym->test(semantics::Symbol::Flag::OmpImplicit);
-
- if (collectPreDetermined)
- return sym->test(semantics::Symbol::Flag::OmpPreDetermined);
-
- return !sym->test(semantics::Symbol::Flag::OmpImplicit) &&
- !sym->test(semantics::Symbol::Flag::OmpPreDetermined);
- };
-
for (const auto *sym : allSymbols) {
assert(curScope && "couldn't find current scope");
if (isPrivatizable(*sym) && !symbolsInNestedRegions.contains(sym) &&
- !explicitlyPrivatizedSymbols.contains(sym) &&
- shouldCollectSymbol(sym) && clauseScopes.contains(&sym->owner())) {
- allPrivatizedSymbols.insert(sym);
+ !privatizedSymbols.contains(sym) &&
+ !sym->test(semantics::Symbol::Flag::OmpPreDetermined) &&
+ (collectImplicit || !sym->test(semantics::Symbol::Flag::OmpImplicit)) &&
+ clauseScopes.contains(&sym->owner()))
symbols.insert(sym);
- }
}
}
@@ -409,16 +363,10 @@ void DataSharingProcessor::collectImplicitSymbols() {
collectSymbols(semantics::Symbol::Flag::OmpImplicit, implicitSymbols);
}
-void DataSharingProcessor::collectPreDeterminedSymbols() {
- if (shouldCollectPreDeterminedSymbols)
- collectSymbols(semantics::Symbol::Flag::OmpPreDetermined,
- preDeterminedSymbols);
-}
-
void DataSharingProcessor::privatize(
mlir::omp::PrivateClauseOps *clauseOps,
llvm::SmallVectorImpl<const semantics::Symbol *> *privateSyms) {
- for (const semantics::Symbol *sym : allPrivatizedSymbols) {
+ for (const semantics::Symbol *sym : privatizedSymbols) {
if (const auto *commonDet =
sym->detailsIf<semantics::CommonBlockDetails>()) {
for (const auto &mem : commonDet->objects())
@@ -430,7 +378,7 @@ void DataSharingProcessor::privatize(
void DataSharingProcessor::copyLastPrivatize(mlir::Operation *op) {
insertLastPrivateCompare(op);
- for (const semantics::Symbol *sym : allPrivatizedSymbols)
+ for (const semantics::Symbol *sym : privatizedSymbols)
if (const auto *commonDet =
sym->detailsIf<semantics::CommonBlockDetails>()) {
for (const auto &mem : commonDet->objects()) {
@@ -441,6 +389,20 @@ void DataSharingProcessor::copyLastPrivatize(mlir::Operation *op) {
}
}
+void DataSharingProcessor::defaultPrivatize(
+ mlir::omp::PrivateClauseOps *clauseOps,
+ llvm::SmallVectorImpl<const semantics::Symbol *> *privateSyms) {
+ for (const semantics::Symbol *sym : defaultSymbols)
+ doPrivatize(sym, clauseOps, privateSyms);
+}
+
+void DataSharingProcessor::implicitPrivatize(
+ mlir::omp::PrivateClauseOps *clauseOps,
+ llvm::SmallVectorImpl<const semantics::Symbol *> *privateSyms) {
+ for (const semantics::Symbol *sym : implicitSymbols)
+ doPrivatize(sym, clauseOps, privateSyms);
+}
+
void DataSharingProcessor::doPrivatize(
const semantics::Symbol *sym, mlir::omp::PrivateClauseOps *clauseOps,
llvm::SmallVectorImpl<const semantics::Symbol *> *privateSyms) {
diff --git a/flang/lib/Lower/OpenMP/DataSharingProcessor.h b/flang/lib/Lower/OpenMP/DataSharingProcessor.h
index 9ec5304eb69d..111266eeb784 100644
--- a/flang/lib/Lower/OpenMP/DataSharingProcessor.h
+++ b/flang/lib/Lower/OpenMP/DataSharingProcessor.h
@@ -37,12 +37,9 @@ private:
mlir::OpBuilder::InsertPoint insPt;
mlir::Value loopIV;
// Symbols in private, firstprivate, and/or lastprivate clauses.
- llvm::SetVector<const semantics::Symbol *> explicitlyPrivatizedSymbols;
+ llvm::SetVector<const semantics::Symbol *> privatizedSymbols;
llvm::SetVector<const semantics::Symbol *> defaultSymbols;
llvm::SetVector<const semantics::Symbol *> implicitSymbols;
- llvm::SetVector<const semantics::Symbol *> preDeterminedSymbols;
- llvm::SetVector<const semantics::Symbol *> allPrivatizedSymbols;
-
llvm::DenseMap<const semantics::Symbol *, mlir::omp::PrivateClauseOp>
symToPrivatizer;
lower::AbstractConverter &converter;
@@ -50,7 +47,6 @@ private:
fir::FirOpBuilder &firOpBuilder;
omp::List<omp::Clause> clauses;
lower::pft::Evaluation &eval;
- bool shouldCollectPreDeterminedSymbols;
bool useDelayedPrivatization;
lower::SymMap *symTable;
@@ -67,7 +63,6 @@ private:
void insertBarrier();
void collectDefaultSymbols();
void collectImplicitSymbols();
- void collectPreDeterminedSymbols();
void privatize(mlir::omp::PrivateClauseOps *clauseOps,
llvm::SmallVectorImpl<const semantics::Symbol *> *privateSyms);
void defaultPrivatize(
@@ -95,12 +90,10 @@ public:
semantics::SemanticsContext &semaCtx,
const List<Clause> &clauses,
lower::pft::Evaluation &eval,
- bool shouldCollectPreDeterminedSymbols,
bool useDelayedPrivatization = false,
lower::SymMap *symTable = nullptr)
: hasLastPrivateOp(false), converter(converter), semaCtx(semaCtx),
firOpBuilder(converter.getFirOpBuilder()), clauses(clauses), eval(eval),
- shouldCollectPreDeterminedSymbols(shouldCollectPreDeterminedSymbols),
useDelayedPrivatization(useDelayedPrivatization), symTable(symTable) {}
// Privatisation is split into two steps.
diff --git a/flang/lib/Lower/OpenMP/Decomposer.cpp b/flang/lib/Lower/OpenMP/Decomposer.cpp
index 66e4028c7a28..e6897cb81e94 100644
--- a/flang/lib/Lower/OpenMP/Decomposer.cpp
+++ b/flang/lib/Lower/OpenMP/Decomposer.cpp
@@ -123,9 +123,4 @@ ConstructQueue buildConstructQueue(
return constructs;
}
-
-bool isLastItemInQueue(ConstructQueue::iterator item,
- const ConstructQueue &queue) {
- return std::next(item) == queue.end();
-}
} // namespace Fortran::lower::omp
diff --git a/flang/lib/Lower/OpenMP/Decomposer.h b/flang/lib/Lower/OpenMP/Decomposer.h
index a7851d8534e5..f42d8f5c1740 100644
--- a/flang/lib/Lower/OpenMP/Decomposer.h
+++ b/flang/lib/Lower/OpenMP/Decomposer.h
@@ -46,9 +46,6 @@ ConstructQueue buildConstructQueue(mlir::ModuleOp modOp,
const parser::CharBlock &source,
llvm::omp::Directive compound,
const List<Clause> &clauses);
-
-bool isLastItemInQueue(ConstructQueue::iterator item,
- const ConstructQueue &queue);
} // namespace Fortran::lower::omp
#endif // FORTRAN_LOWER_OPENMP_DECOMPOSER_H
diff --git a/flang/lib/Lower/OpenMP/OpenMP.cpp b/flang/lib/Lower/OpenMP/OpenMP.cpp
index ece098a5bfbb..17b362cc2f32 100644
--- a/flang/lib/Lower/OpenMP/OpenMP.cpp
+++ b/flang/lib/Lower/OpenMP/OpenMP.cpp
@@ -215,10 +215,12 @@ createAndSetPrivatizedLoopVar(lower::AbstractConverter &converter,
firOpBuilder.setInsertionPointToStart(firOpBuilder.getAllocaBlock());
mlir::Type tempTy = converter.genType(*sym);
-
- assert(converter.isPresentShallowLookup(*sym) &&
- "Expected symbol to be in symbol table.");
-
+ mlir::Value temp = firOpBuilder.create<fir::AllocaOp>(
+ loc, tempTy, /*pinned=*/true, /*lengthParams=*/mlir::ValueRange{},
+ /*shapeParams*/ mlir::ValueRange{},
+ llvm::ArrayRef<mlir::NamedAttribute>{
+ fir::getAdaptToByRefAttr(firOpBuilder)});
+ converter.bindSymbol(*sym, temp);
firOpBuilder.restoreInsertionPoint(insPt);
mlir::Value cvtVal = firOpBuilder.createConvert(loc, tempTy, indexVal);
mlir::Operation *storeOp = firOpBuilder.create<fir::StoreOp>(
@@ -578,8 +580,7 @@ static void createBodyOfOp(mlir::Operation &op, const OpWithBodyGenInfo &info,
std::optional<DataSharingProcessor> tempDsp;
if (privatize) {
if (!info.dsp) {
- tempDsp.emplace(info.converter, info.semaCtx, *info.clauses, info.eval,
- Fortran::lower::omp::isLastItemInQueue(item, queue));
+ tempDsp.emplace(info.converter, info.semaCtx, *info.clauses, info.eval);
tempDsp->processStep1();
}
}
@@ -1315,7 +1316,6 @@ genParallelOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
bool privatize = !outerCombined;
DataSharingProcessor dsp(converter, semaCtx, item->clauses, eval,
- lower::omp::isLastItemInQueue(item, queue),
/*useDelayedPrivatization=*/true, &symTable);
if (privatize)
@@ -1388,8 +1388,7 @@ genSectionsOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
// Insert privatizations before SECTIONS
symTable.pushScope();
- DataSharingProcessor dsp(converter, semaCtx, item->clauses, eval,
- lower::omp::isLastItemInQueue(item, queue));
+ DataSharingProcessor dsp(converter, semaCtx, item->clauses, eval);
dsp.processStep1();
List<Clause> nonDsaClauses;
@@ -1459,9 +1458,7 @@ genSimdOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
mlir::Location loc, const ConstructQueue &queue,
ConstructQueue::iterator item) {
fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
- symTable.pushScope();
- DataSharingProcessor dsp(converter, semaCtx, item->clauses, eval,
- lower::omp::isLastItemInQueue(item, queue));
+ DataSharingProcessor dsp(converter, semaCtx, item->clauses, eval);
dsp.processStep1();
lower::StatementContext stmtCtx;
@@ -1499,7 +1496,6 @@ genSimdOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
.setGenRegionEntryCb(ivCallback),
queue, item);
- symTable.popScope();
return simdOp;
}
@@ -1765,9 +1761,7 @@ genWsloopOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
mlir::Location loc, const ConstructQueue &queue,
ConstructQueue::iterator item) {
fir::FirOpBuilder &firOpBuilder = converter.getFirOpBuilder();
- symTable.pushScope();
- DataSharingProcessor dsp(converter, semaCtx, item->clauses, eval,
- lower::omp::isLastItemInQueue(item, queue));
+ DataSharingProcessor dsp(converter, semaCtx, item->clauses, eval);
dsp.processStep1();
lower::StatementContext stmtCtx;
@@ -1810,7 +1804,6 @@ genWsloopOp(lower::AbstractConverter &converter, lower::SymMap &symTable,
.setReductions(&reductionSyms, &reductionTypes)
.setGenRegionEntryCb(ivCallback),
queue, item);
- symTable.popScope();
return wsloopOp;
}
@@ -1899,8 +1892,7 @@ static void genOMPDispatch(lower::AbstractConverter &converter,
break;
case llvm::omp::Directive::OMPD_loop:
case llvm::omp::Directive::OMPD_masked:
- TODO(loc, "Unhandled loop directive (" +
- llvm::omp::getOpenMPDirectiveName(dir) + ")");
+ TODO(loc, "Unhandled directive " + llvm::omp::getOpenMPDirectiveName(dir));
break;
case llvm::omp::Directive::OMPD_master:
genMasterOp(converter, symTable, semaCtx, eval, loc, queue, item);
diff --git a/flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt b/flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt
index 81db40f3ba46..ec5484c1d610 100644
--- a/flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt
+++ b/flang/lib/Optimizer/Dialect/CUF/Attributes/CMakeLists.txt
@@ -5,6 +5,7 @@ add_flang_library(CUFAttrs
DEPENDS
MLIRIR
CUFAttrsIncGen
+ CUFOpsIncGen
LINK_LIBS
MLIRTargetLLVMIRExport
diff --git a/flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp b/flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp
index 870652c72fab..2c0c4c2cfae3 100644
--- a/flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp
+++ b/flang/lib/Optimizer/Dialect/CUF/CUFOps.cpp
@@ -90,6 +90,24 @@ mlir::LogicalResult cuf::AllocateOp::verify() {
}
//===----------------------------------------------------------------------===//
+// DataTransferOp
+//===----------------------------------------------------------------------===//
+
+mlir::LogicalResult cuf::DataTransferOp::verify() {
+ mlir::Type srcTy = getSrc().getType();
+ mlir::Type dstTy = getDst().getType();
+ if ((fir::isa_ref_type(srcTy) && fir::isa_ref_type(dstTy)) ||
+ (fir::isa_box_type(srcTy) && fir::isa_box_type(dstTy)))
+ return mlir::success();
+ if (fir::isa_trivial(srcTy) &&
+ matchPattern(getSrc().getDefiningOp(), mlir::m_Constant()))
+ return mlir::success();
+ return emitOpError()
+ << "expect src and dst to be both references or descriptors or src to "
+ "be a constant";
+}
+
+//===----------------------------------------------------------------------===//
// DeallocateOp
//===----------------------------------------------------------------------===//
diff --git a/flang/lib/Optimizer/HLFIR/Transforms/InlineElementals.cpp b/flang/lib/Optimizer/HLFIR/Transforms/InlineElementals.cpp
index a99038fdfba9..06d051876384 100644
--- a/flang/lib/Optimizer/HLFIR/Transforms/InlineElementals.cpp
+++ b/flang/lib/Optimizer/HLFIR/Transforms/InlineElementals.cpp
@@ -115,7 +115,6 @@ class InlineElementalsPass
: public hlfir::impl::InlineElementalsBase<InlineElementalsPass> {
public:
void runOnOperation() override {
- mlir::func::FuncOp func = getOperation();
mlir::MLIRContext *context = &getContext();
mlir::GreedyRewriteConfig config;
@@ -126,14 +125,11 @@ public:
patterns.insert<InlineElementalConversion>(context);
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(
- func, std::move(patterns), config))) {
- mlir::emitError(func->getLoc(), "failure in HLFIR elemental inlining");
+ getOperation(), std::move(patterns), config))) {
+ mlir::emitError(getOperation()->getLoc(),
+ "failure in HLFIR elemental inlining");
signalPassFailure();
}
}
};
} // namespace
-
-std::unique_ptr<mlir::Pass> hlfir::createInlineElementalsPass() {
- return std::make_unique<InlineElementalsPass>();
-}
diff --git a/flang/lib/Parser/openmp-parsers.cpp b/flang/lib/Parser/openmp-parsers.cpp
index 48f213794247..e67dbcca30e7 100644
--- a/flang/lib/Parser/openmp-parsers.cpp
+++ b/flang/lib/Parser/openmp-parsers.cpp
@@ -266,6 +266,8 @@ TYPE_PARSER(
construct<OmpClause>(construct<OmpClause::DynamicAllocators>()) ||
"ENTER" >> construct<OmpClause>(construct<OmpClause::Enter>(
parenthesized(Parser<OmpObjectList>{}))) ||
+ "FILTER" >> construct<OmpClause>(construct<OmpClause::Filter>(
+ parenthesized(scalarIntExpr))) ||
"FINAL" >> construct<OmpClause>(construct<OmpClause::Final>(
parenthesized(scalarLogicalExpr))) ||
"FULL" >> construct<OmpClause>(construct<OmpClause::Full>()) ||
@@ -376,8 +378,15 @@ TYPE_PARSER(sourced(construct<OmpLoopDirective>(first(
"DISTRIBUTE" >> pure(llvm::omp::Directive::OMPD_distribute),
"DO SIMD" >> pure(llvm::omp::Directive::OMPD_do_simd),
"DO" >> pure(llvm::omp::Directive::OMPD_do),
+ "MASKED TASKLOOP SIMD" >>
+ pure(llvm::omp::Directive::OMPD_masked_taskloop_simd),
+ "MASKED TASKLOOP" >> pure(llvm::omp::Directive::OMPD_masked_taskloop),
"PARALLEL DO SIMD" >> pure(llvm::omp::Directive::OMPD_parallel_do_simd),
"PARALLEL DO" >> pure(llvm::omp::Directive::OMPD_parallel_do),
+ "PARALLEL MASKED TASKLOOP SIMD" >>
+ pure(llvm::omp::Directive::OMPD_parallel_masked_taskloop_simd),
+ "PARALLEL MASKED TASKLOOP" >>
+ pure(llvm::omp::Directive::OMPD_parallel_masked_taskloop),
"SIMD" >> pure(llvm::omp::Directive::OMPD_simd),
"TARGET PARALLEL DO SIMD" >>
pure(llvm::omp::Directive::OMPD_target_parallel_do_simd),
@@ -487,8 +496,10 @@ TYPE_PARSER(
// Directives enclosing structured-block
TYPE_PARSER(construct<OmpBlockDirective>(first(
+ "MASKED" >> pure(llvm::omp::Directive::OMPD_masked),
"MASTER" >> pure(llvm::omp::Directive::OMPD_master),
"ORDERED" >> pure(llvm::omp::Directive::OMPD_ordered),
+ "PARALLEL MASKED" >> pure(llvm::omp::Directive::OMPD_parallel_masked),
"PARALLEL WORKSHARE" >> pure(llvm::omp::Directive::OMPD_parallel_workshare),
"PARALLEL" >> pure(llvm::omp::Directive::OMPD_parallel),
"SINGLE" >> pure(llvm::omp::Directive::OMPD_single),
diff --git a/flang/lib/Parser/unparse.cpp b/flang/lib/Parser/unparse.cpp
index 1639e900903f..bdd968b19a43 100644
--- a/flang/lib/Parser/unparse.cpp
+++ b/flang/lib/Parser/unparse.cpp
@@ -2194,12 +2194,24 @@ public:
case llvm::omp::Directive::OMPD_do_simd:
Word("DO SIMD ");
break;
+ case llvm::omp::Directive::OMPD_masked_taskloop_simd:
+ Word("MASKED TASKLOOP SIMD");
+ break;
+ case llvm::omp::Directive::OMPD_masked_taskloop:
+ Word("MASKED TASKLOOP");
+ break;
case llvm::omp::Directive::OMPD_parallel_do:
Word("PARALLEL DO ");
break;
case llvm::omp::Directive::OMPD_parallel_do_simd:
Word("PARALLEL DO SIMD ");
break;
+ case llvm::omp::Directive::OMPD_parallel_masked_taskloop_simd:
+ Word("PARALLEL MASKED TASKLOOP SIMD");
+ break;
+ case llvm::omp::Directive::OMPD_parallel_masked_taskloop:
+ Word("PARALLEL MASKED TASKLOOP");
+ break;
case llvm::omp::Directive::OMPD_simd:
Word("SIMD ");
break;
@@ -2283,12 +2295,18 @@ public:
}
void Unparse(const OmpBlockDirective &x) {
switch (x.v) {
+ case llvm::omp::Directive::OMPD_masked:
+ Word("MASKED");
+ break;
case llvm::omp::Directive::OMPD_master:
Word("MASTER");
break;
case llvm::omp::Directive::OMPD_ordered:
Word("ORDERED ");
break;
+ case llvm::omp::Directive::OMPD_parallel_masked:
+ Word("PARALLEL MASKED");
+ break;
case llvm::omp::Directive::OMPD_parallel_workshare:
Word("PARALLEL WORKSHARE ");
break;
diff --git a/flang/lib/Semantics/resolve-directives.cpp b/flang/lib/Semantics/resolve-directives.cpp
index 2add2056f658..dbc531372c3f 100644
--- a/flang/lib/Semantics/resolve-directives.cpp
+++ b/flang/lib/Semantics/resolve-directives.cpp
@@ -1503,6 +1503,8 @@ bool OmpAttributeVisitor::Pre(const parser::OpenMPBlockConstruct &x) {
const auto &beginBlockDir{std::get<parser::OmpBeginBlockDirective>(x.t)};
const auto &beginDir{std::get<parser::OmpBlockDirective>(beginBlockDir.t)};
switch (beginDir.v) {
+ case llvm::omp::Directive::OMPD_masked:
+ case llvm::omp::Directive::OMPD_parallel_masked:
case llvm::omp::Directive::OMPD_master:
case llvm::omp::Directive::OMPD_ordered:
case llvm::omp::Directive::OMPD_parallel:
@@ -1532,6 +1534,8 @@ void OmpAttributeVisitor::Post(const parser::OpenMPBlockConstruct &x) {
const auto &beginBlockDir{std::get<parser::OmpBeginBlockDirective>(x.t)};
const auto &beginDir{std::get<parser::OmpBlockDirective>(beginBlockDir.t)};
switch (beginDir.v) {
+ case llvm::omp::Directive::OMPD_masked:
+ case llvm::omp::Directive::OMPD_parallel_masked:
case llvm::omp::Directive::OMPD_parallel:
case llvm::omp::Directive::OMPD_single:
case llvm::omp::Directive::OMPD_target:
@@ -1598,8 +1602,12 @@ bool OmpAttributeVisitor::Pre(const parser::OpenMPLoopConstruct &x) {
case llvm::omp::Directive::OMPD_distribute_simd:
case llvm::omp::Directive::OMPD_do:
case llvm::omp::Directive::OMPD_do_simd:
+ case llvm::omp::Directive::OMPD_masked_taskloop_simd:
+ case llvm::omp::Directive::OMPD_masked_taskloop:
case llvm::omp::Directive::OMPD_parallel_do:
case llvm::omp::Directive::OMPD_parallel_do_simd:
+ case llvm::omp::Directive::OMPD_parallel_masked_taskloop_simd:
+ case llvm::omp::Directive::OMPD_parallel_masked_taskloop:
case llvm::omp::Directive::OMPD_simd:
case llvm::omp::Directive::OMPD_target_parallel_do:
case llvm::omp::Directive::OMPD_target_parallel_do_simd:
diff --git a/flang/runtime/CMakeLists.txt b/flang/runtime/CMakeLists.txt
index 4f7627eac81f..4c2afd0abe90 100644
--- a/flang/runtime/CMakeLists.txt
+++ b/flang/runtime/CMakeLists.txt
@@ -199,6 +199,7 @@ set(supported_files
inquiry.cpp
internal-unit.cpp
io-api.cpp
+ io-api-minimal.cpp
io-error.cpp
io-stmt.cpp
iostat.cpp
diff --git a/flang/runtime/edit-output.cpp b/flang/runtime/edit-output.cpp
index 13ab91fc56ea..6b24c5648318 100644
--- a/flang/runtime/edit-output.cpp
+++ b/flang/runtime/edit-output.cpp
@@ -832,8 +832,11 @@ RT_API_ATTRS bool EditLogicalOutput(
reinterpret_cast<const unsigned char *>(&truth), sizeof truth);
case 'A': { // legacy extension
int truthBits{truth};
- return EditCharacterOutput(
- io, edit, reinterpret_cast<char *>(&truthBits), sizeof truthBits);
+ int len{sizeof truthBits};
+ int width{edit.width.value_or(len)};
+ return EmitRepeated(io, ' ', std::max(0, width - len)) &&
+ EmitEncoded(
+ io, reinterpret_cast<char *>(&truthBits), std::min(width, len));
}
default:
io.GetIoErrorHandler().SignalError(IostatErrorInFormat,
diff --git a/flang/test/Driver/mlir-debug-pass-pipeline.f90 b/flang/test/Driver/mlir-debug-pass-pipeline.f90
index a9980e3c932c..e555ce735853 100644
--- a/flang/test/Driver/mlir-debug-pass-pipeline.f90
+++ b/flang/test/Driver/mlir-debug-pass-pipeline.f90
@@ -25,8 +25,15 @@ end program
! ALL: Pass statistics report
! ALL: Fortran::lower::VerifierPass
+! ALL-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
+! ALL-NEXT: 'fir.global' Pipeline
+! ALL-NEXT: InlineElementals
! ALL-NEXT: 'func.func' Pipeline
! ALL-NEXT: InlineElementals
+! ALL-NEXT: 'omp.declare_reduction' Pipeline
+! ALL-NEXT: InlineElementals
+! ALL-NEXT: 'omp.private' Pipeline
+! ALL-NEXT: InlineElementals
! ALL-NEXT: LowerHLFIROrderedAssignments
! ALL-NEXT: LowerHLFIRIntrinsics
! ALL-NEXT: BufferizeHLFIR
diff --git a/flang/test/Driver/mlir-pass-pipeline.f90 b/flang/test/Driver/mlir-pass-pipeline.f90
index 7130024e43b9..6d0e6c3bdcce 100644
--- a/flang/test/Driver/mlir-pass-pipeline.f90
+++ b/flang/test/Driver/mlir-pass-pipeline.f90
@@ -13,16 +13,25 @@ end program
! ALL: Fortran::lower::VerifierPass
! O2-NEXT: Canonicalizer
-! O2-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
-! O2-NEXT: 'fir.global' Pipeline
+! ALL: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
+! ALL-NEXT:'fir.global' Pipeline
! O2-NEXT: SimplifyHLFIRIntrinsics
-! ALL: 'func.func' Pipeline
+! ALL: InlineElementals
+! ALL-NEXT:'func.func' Pipeline
! O2-NEXT: SimplifyHLFIRIntrinsics
! ALL: InlineElementals
-! O2-NEXT: 'omp.declare_reduction' Pipeline
+! ALL-NEXT:'omp.declare_reduction' Pipeline
! O2-NEXT: SimplifyHLFIRIntrinsics
-! O2-NEXT: 'omp.private' Pipeline
+! ALL: InlineElementals
+! ALL-NEXT:'omp.private' Pipeline
! O2-NEXT: SimplifyHLFIRIntrinsics
+! ALL: InlineElementals
+! O2-NEXT: Canonicalizer
+! O2-NEXT: CSE
+! O2-NEXT: (S) {{.*}} num-cse'd
+! O2-NEXT: (S) {{.*}} num-dce'd
+! O2-NEXT: 'func.func' Pipeline
+! O2-NEXT: OptimizedBufferization
! ALL: LowerHLFIROrderedAssignments
! ALL-NEXT: LowerHLFIRIntrinsics
! ALL-NEXT: BufferizeHLFIR
diff --git a/flang/test/Fir/basic-program.fir b/flang/test/Fir/basic-program.fir
index 9e3d3c18337d..42bceb66668d 100644
--- a/flang/test/Fir/basic-program.fir
+++ b/flang/test/Fir/basic-program.fir
@@ -20,13 +20,16 @@ func.func @_QQmain() {
// PASSES-NEXT: Pipeline Collection : ['fir.global', 'func.func', 'omp.declare_reduction', 'omp.private']
// PASSES-NEXT: 'fir.global' Pipeline
// PASSES-NEXT: SimplifyHLFIRIntrinsics
+// PASSES-NEXT: InlineElementals
// PASSES-NEXT: 'func.func' Pipeline
// PASSES-NEXT: SimplifyHLFIRIntrinsics
// PASSES-NEXT: InlineElementals
// PASSES-NEXT: 'omp.declare_reduction' Pipeline
// PASSES-NEXT: SimplifyHLFIRIntrinsics
+// PASSES-NEXT: InlineElementals
// PASSES-NEXT: 'omp.private' Pipeline
// PASSES-NEXT: SimplifyHLFIRIntrinsics
+// PASSES-NEXT: InlineElementals
// PASSES-NEXT: Canonicalizer
// PASSES-NEXT: CSE
// PASSES-NEXT: (S) 0 num-cse'd - Number of operations CSE'd
diff --git a/flang/test/Lower/CUDA/cuda-data-transfer.cuf b/flang/test/Lower/CUDA/cuda-data-transfer.cuf
index 084314ed63ec..42fa4d09c95e 100644
--- a/flang/test/Lower/CUDA/cuda-data-transfer.cuf
+++ b/flang/test/Lower/CUDA/cuda-data-transfer.cuf
@@ -25,6 +25,8 @@ subroutine sub1()
adev = ahost + bhost
+ adev = 10
+
end
! CHECK-LABEL: func.func @_QPsub1()
@@ -41,10 +43,7 @@ end
! CHECK: cuf.data_transfer %[[ASSOC]]#0 to %[[M]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : !fir.ref<i32>, !fir.ref<i32>
! CHECK: hlfir.end_associate %[[ASSOC]]#1, %[[ASSOC]]#2 : !fir.ref<i32>, i1
-! CHECK: %[[C1:.*]] = arith.constant 1 : i32
-! CHECK: %[[ASSOC:.*]]:3 = hlfir.associate %[[C1]] {uniq_name = ".cuf_host_tmp"} : (i32) -> (!fir.ref<i32>, !fir.ref<i32>, i1)
-! CHECK: cuf.data_transfer %[[ASSOC]]#0 to %[[M]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : !fir.ref<i32>, !fir.ref<i32>
-! CHECK: hlfir.end_associate %[[ASSOC]]#1, %[[ASSOC]]#2 : !fir.ref<i32>, i1
+! CHECK: cuf.data_transfer %c1{{.*}} to %[[M]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : i32, !fir.ref<i32>
! CHECK: cuf.data_transfer %[[AHOST]]#0 to %[[ADEV]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : !fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>
@@ -62,6 +61,8 @@ end
! CHECK: cuf.data_transfer %[[ASSOC]]#0 to %[[ADEV]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : !fir.ref<!fir.array<10xi32>>, !fir.ref<!fir.array<10xi32>>
! CHECK: hlfir.end_associate %[[ASSOC]]#1, %[[ASSOC]]#2 : !fir.ref<!fir.array<10xi32>>, i1
+! CHECK: cuf.data_transfer %c10{{.*}} to %[[ADEV]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : i32, !fir.ref<!fir.array<10xi32>>
+
subroutine sub2()
integer, device :: m
integer, device :: adev(10), bdev(10)
@@ -159,3 +160,22 @@ end subroutine
! CHECK-LABEL: func.func @_QPsub6
! CHECK: cuf.data_transfer
+
+subroutine sub7(a, b, c)
+ integer, device, allocatable :: a(:), c(:)
+ integer, allocatable :: b(:)
+ b = a
+
+ a = b
+
+ c = a
+end subroutine
+
+! CHECK-LABEL: func.func @_QPsub7(
+! CHECK-SAME: %[[ARG0:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {cuf.data_attr = #cuf.cuda<device>, fir.bindc_name = "a"}, %[[ARG1:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {fir.bindc_name = "b"}, %[[ARG2:.*]]: !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>> {cuf.data_attr = #cuf.cuda<device>, fir.bindc_name = "c"}) {
+! CHECK: %[[A:.*]]:2 = hlfir.declare %[[ARG0]] dummy_scope %{{.*}} {data_attr = #cuf.cuda<device>, fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFsub7Ea"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.dscope) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
+! CHECK: %[[B:.*]]:2 = hlfir.declare %[[ARG1]] dummy_scope %{{.*}} {fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFsub7Eb"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.dscope) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
+! CHECK: %[[C:.*]]:2 = hlfir.declare %[[ARG2]] dummy_scope %0 {data_attr = #cuf.cuda<device>, fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFsub7Ec"} : (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.dscope) -> (!fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>)
+! CHECK: cuf.data_transfer %[[A]]#0 to %[[B]]#0 {transfer_kind = #cuf.cuda_transfer<device_host>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+! CHECK: cuf.data_transfer %[[B]]#0 to %[[A]]#0 {transfer_kind = #cuf.cuda_transfer<host_device>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
+! CHECK: cuf.data_transfer %[[A]]#0 to %[[C]]#0 {transfer_kind = #cuf.cuda_transfer<device_device>} : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>, !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
diff --git a/flang/test/Lower/OpenMP/Todo/masked-directive.f90 b/flang/test/Lower/OpenMP/Todo/masked-directive.f90
new file mode 100644
index 000000000000..77767715af52
--- /dev/null
+++ b/flang/test/Lower/OpenMP/Todo/masked-directive.f90
@@ -0,0 +1,13 @@
+! This test checks lowering of OpenMP masked Directive.
+
+! RUN: %not_todo_cmd bbc -emit-fir -fopenmp -o - %s 2>&1 | FileCheck %s
+! RUN: %not_todo_cmd %flang_fc1 -emit-fir -fopenmp -o - %s 2>&1 | FileCheck %s
+
+! CHECK: not yet implemented: Unhandled directive masked
+subroutine test_masked()
+ integer :: c = 1
+ !$omp masked
+ c = c + 1
+ !$omp end masked
+end subroutine
+
diff --git a/flang/test/Lower/OpenMP/Todo/omp-default-clause-inner-loop.f90 b/flang/test/Lower/OpenMP/Todo/omp-default-clause-inner-loop.f90
index 773452206993..c245137f16c7 100644
--- a/flang/test/Lower/OpenMP/Todo/omp-default-clause-inner-loop.f90
+++ b/flang/test/Lower/OpenMP/Todo/omp-default-clause-inner-loop.f90
@@ -8,7 +8,7 @@
! CHECK: omp.parallel {
! EXPECTED: %[[PRIVATE_Y:.*]] = fir.alloca i32 {bindc_name = "y", pinned, uniq_name = "_QFEy"}
! EXPECTED: %[[PRIVATE_Z:.*]] = fir.alloca i32 {bindc_name = "z", pinned, uniq_name = "_QFEz"}
-! CHECK: %[[TEMP:.*]] = fir.alloca i32 {bindc_name = "x", pinned, {{.*}}}
+! CHECK: %[[TEMP:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[const_1:.*]] = arith.constant 1 : i32
! CHECK: %[[const_2:.*]] = arith.constant 10 : i32
! CHECK: %[[const_3:.*]] = arith.constant 1 : i32
diff --git a/flang/test/Lower/OpenMP/copyin.f90 b/flang/test/Lower/OpenMP/copyin.f90
index 34c83fca4641..dda563303148 100644
--- a/flang/test/Lower/OpenMP/copyin.f90
+++ b/flang/test/Lower/OpenMP/copyin.f90
@@ -146,17 +146,13 @@ end
! CHECK: %[[VAL_4:.*]] = omp.threadprivate %[[VAL_3]]#1 : !fir.ref<i32> -> !fir.ref<i32>
! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFcombined_parallel_worksharing_loopEx6"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: omp.parallel {
-
+! CHECK: %[[VAL_6:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFcombined_parallel_worksharing_loopEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_8:.*]] = omp.threadprivate %[[VAL_3]]#1 : !fir.ref<i32> -> !fir.ref<i32>
! CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFcombined_parallel_worksharing_loopEx6"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_10:.*]] = fir.load %[[VAL_5]]#0 : !fir.ref<i32>
! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_9]]#0 temporary_lhs : i32, !fir.ref<i32>
-
! CHECK: omp.barrier
-
-! CHECK: %[[VAL_6:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFcombined_parallel_worksharing_loopEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! CHECK: %[[VAL_11:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_12:.*]] = fir.load %[[VAL_9]]#0 : !fir.ref<i32>
! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
@@ -284,6 +280,7 @@ subroutine common_1()
!$omp end parallel sections
end subroutine
+
! CHECK-LABEL: func.func @_QPcommon_2() {
! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFcommon_2Ei"}
! CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFcommon_2Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
@@ -305,7 +302,8 @@ end subroutine
! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (!fir.ref<i8>) -> !fir.ref<i32>
! CHECK: %[[VAL_18:.*]]:2 = hlfir.declare %[[VAL_17]] {uniq_name = "_QFcommon_2Ey"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: omp.parallel {
-
+! CHECK: %[[VAL_19:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_19]] {uniq_name = "_QFcommon_2Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_21:.*]] = omp.threadprivate %[[VAL_2]] : !fir.ref<!fir.array<8xi8>> -> !fir.ref<!fir.array<8xi8>>
! CHECK: %[[VAL_22:.*]] = fir.convert %[[VAL_21]] : (!fir.ref<!fir.array<8xi8>>) -> !fir.ref<!fir.array<?xi8>>
! CHECK: %[[VAL_23:.*]] = arith.constant 0 : index
@@ -322,10 +320,6 @@ end subroutine
! CHECK: %[[VAL_33:.*]] = fir.load %[[VAL_18]]#0 : !fir.ref<i32>
! CHECK: hlfir.assign %[[VAL_33]] to %[[VAL_31]]#0 temporary_lhs : i32, !fir.ref<i32>
! CHECK: omp.barrier
-
-! CHECK: %[[VAL_19:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-! CHECK: %[[VAL_20:.*]]:2 = hlfir.declare %[[VAL_19]] {uniq_name = "_QFcommon_2Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! CHECK: %[[VAL_34:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_35:.*]] = fir.load %[[VAL_26]]#0 : !fir.ref<i32>
! CHECK: %[[VAL_36:.*]] = arith.constant 1 : i32
diff --git a/flang/test/Lower/OpenMP/critical.f90 b/flang/test/Lower/OpenMP/critical.f90
index d62c58b3081a..98a90c959484 100644
--- a/flang/test/Lower/OpenMP/critical.f90
+++ b/flang/test/Lower/OpenMP/critical.f90
@@ -28,26 +28,3 @@ subroutine omp_critical()
!CHECK: omp.terminator
!$OMP END CRITICAL
end subroutine omp_critical
-
-
-! Tests that privatization for pre-determined variables (here `i`) is properly
-! handled.
-subroutine predetermined_privatization()
- integer :: a(10), i
-
- !CHECK: omp.parallel
- !$omp parallel do
-
- !CHECK: %[[PRIV_I_ALLOC:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
- !CHECK: %[[PRIV_I_DECL:.*]]:2 = hlfir.declare %[[PRIV_I_ALLOC]]
- do i = 2, 10
- !CHECK: omp.wsloop
- !CHECK: omp.loop_nest (%[[IV:[^[:space:]]+]])
- !CHECK: fir.store %[[IV]] to %[[PRIV_I_DECL]]#1
- !CHECK: omp.critical
- !$omp critical
- a(i) = a(i-1) + 1
- !$omp end critical
- end do
- !$omp end parallel do
-end
diff --git a/flang/test/Lower/OpenMP/default-clause.f90 b/flang/test/Lower/OpenMP/default-clause.f90
index 60a9c5efbb2a..843ee6bb7910 100644
--- a/flang/test/Lower/OpenMP/default-clause.f90
+++ b/flang/test/Lower/OpenMP/default-clause.f90
@@ -354,13 +354,10 @@ end subroutine
!CHECK-LABEL: func @_QPnested_default_clause_test5
!CHECK: omp.parallel {
-
+!CHECK: %[[LOOP_VAR_ALLOCA:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+!CHECK: %[[LOOP_VAR_DECLARE:.*]]:2 = hlfir.declare %[[LOOP_VAR_ALLOCA]] {{.*}}
!CHECK: %[[X_ALLOCA:.*]] = fir.alloca i32 {bindc_name = "x", pinned, uniq_name = "_QFnested_default_clause_test5Ex"}
!CHECK: %[[X_DECLARE:.*]]:2 = hlfir.declare %[[X_ALLOCA]] {{.*}}
-
-!CHECK: %[[LOOP_VAR_ALLOCA:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-!CHECK: %[[LOOP_VAR_DECLARE:.*]]:2 = hlfir.declare %[[LOOP_VAR_ALLOCA]] {{.*}}
-
!CHECK: %[[CONST_LB:.*]] = arith.constant 1 : i32
!CHECK: %[[CONST_UB:.*]] = arith.constant 50 : i32
!CHECK: %[[CONST_STEP:.*]] = arith.constant 1 : i32
@@ -386,18 +383,14 @@ end subroutine
!CHECK-LABEL: func @_QPnested_default_clause_test6
!CHECK: omp.parallel {
+!CHECK: %[[LOOP_VAR:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+!CHECK: %[[LOOP_VAR_DECLARE:.*]]:2 = hlfir.declare %[[LOOP_VAR]] {{.*}}
!CHECK: %[[X_VAR:.*]] = fir.alloca i32 {bindc_name = "x", pinned, uniq_name = "_QFnested_default_clause_test6Ex"}
!CHECK: %[[X_VAR_DECLARE:.*]]:2 = hlfir.declare %[[X_VAR]] {{.*}}
-
!CHECK: %[[Y_VAR:.*]] = fir.alloca i32 {bindc_name = "y", pinned, uniq_name = "_QFnested_default_clause_test6Ey"}
!CHECK: %[[Y_VAR_DECLARE:.*]]:2 = hlfir.declare %[[Y_VAR]] {{.*}}
-
!CHECK: %[[Z_VAR:.*]] = fir.alloca i32 {bindc_name = "z", pinned, uniq_name = "_QFnested_default_clause_test6Ez"}
!CHECK: %[[Z_VAR_DECLARE:.*]]:2 = hlfir.declare %[[Z_VAR]] {{.*}}
-
-!CHECK: %[[LOOP_VAR:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-!CHECK: %[[LOOP_VAR_DECLARE:.*]]:2 = hlfir.declare %[[LOOP_VAR]] {{.*}}
-
!CHECK: %[[CONST_LB:.*]] = arith.constant 1 : i32
!CHECK: %[[CONST_UB:.*]] = arith.constant 10 : i32
!CHECK: %[[CONST_STEP:.*]] = arith.constant 1 : i32
@@ -542,20 +535,16 @@ subroutine nested_constructs
integer :: y, z
!CHECK: omp.parallel {
-
+!CHECK: %[[INNER_J:.*]] = fir.alloca i32 {bindc_name = "j", pinned}
+!CHECK: %[[INNER_J_DECL:.*]]:2 = hlfir.declare %[[INNER_J]] {{.*}}
+!CHECK: %[[INNER_I:.*]] = fir.alloca i32 {bindc_name = "i", pinned}
+!CHECK: %[[INNER_I_DECL:.*]]:2 = hlfir.declare %[[INNER_I]] {{.*}}
!CHECK: %[[INNER_Y:.*]] = fir.alloca i32 {bindc_name = "y", pinned, uniq_name = "_QFnested_constructsEy"}
!CHECK: %[[INNER_Y_DECL:.*]]:2 = hlfir.declare %[[INNER_Y]] {{.*}}
!CHECK: %[[TEMP:.*]] = fir.load %[[Y_DECL]]#0 : !fir.ref<i32>
!CHECK: hlfir.assign %[[TEMP]] to %[[INNER_Y_DECL]]#0 temporary_lhs : i32, !fir.ref<i32>
-
!CHECK: %[[INNER_Z:.*]] = fir.alloca i32 {bindc_name = "z", pinned, uniq_name = "_QFnested_constructsEz"}
!CHECK: %[[INNER_Z_DECL:.*]]:2 = hlfir.declare %[[INNER_Z]] {{.*}}
-
-!CHECK: %[[INNER_I:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-!CHECK: %[[INNER_I_DECL:.*]]:2 = hlfir.declare %[[INNER_I]] {{.*}}
-
-!CHECK: %[[INNER_J:.*]] = fir.alloca i32 {bindc_name = "j", pinned, {{.*}}}
-!CHECK: %[[INNER_J_DECL:.*]]:2 = hlfir.declare %[[INNER_J]] {{.*}}
!$omp parallel default(private) firstprivate(y)
!CHECK: {{.*}} = fir.do_loop {{.*}} {
do i = 1, 10
diff --git a/flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f90 b/flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f90
index 271b97819e60..68d20213a14b 100644
--- a/flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f90
+++ b/flang/test/Lower/OpenMP/hlfir-seqloop-parallel.f90
@@ -18,7 +18,7 @@ end subroutine
!CHECK: %[[I_ADDR:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFsb1Ei"}
!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_ADDR]] {uniq_name = "_QFsb1Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: omp.parallel {
-!CHECK: %[[I_PVT_ADDR:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+!CHECK: %[[I_PVT_ADDR:.*]] = fir.alloca i32 {bindc_name = "i", pinned}
!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_ADDR]] {uniq_name = "_QFsb1Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: %[[I_FINAL_VAL:.*]]:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (index, i32) {
!CHECK: fir.store %[[I_VAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
@@ -51,15 +51,12 @@ end subroutine
!CHECK: %[[K_ADDR:.*]] = fir.alloca i32 {bindc_name = "k", uniq_name = "_QFsb2Ek"}
!CHECK: %[[K_DECL:.*]]:2 = hlfir.declare %[[K_ADDR]] {uniq_name = "_QFsb2Ek"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: omp.parallel {
-
-!CHECK: %[[J_PVT_ADDR:.*]] = fir.alloca i32 {bindc_name = "j", pinned, {{.*}}}
-!CHECK: %[[J_PVT_DECL:.*]]:2 = hlfir.declare %[[J_PVT_ADDR]] {uniq_name = "_QFsb2Ej"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
-!CHECK: %[[I_PVT_ADDR:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+!CHECK: %[[I_PVT_ADDR:.*]] = fir.alloca i32 {bindc_name = "i", pinned}
!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_ADDR]] {uniq_name = "_QFsb2Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
+!CHECK: %[[J_PVT_ADDR:.*]] = fir.alloca i32 {bindc_name = "j", pinned}
+!CHECK: %[[J_PVT_DECL:.*]]:2 = hlfir.declare %[[J_PVT_ADDR]] {uniq_name = "_QFsb2Ej"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: %[[FINAL_J_VAL:.*]]:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[J_VAL:.*]] = %{{.*}}) -> (index, i32) {
-!CHECK: fir.store %arg1 to %[[J_PVT_DECL]]#1 : !fir.ref<i32>
+!CHECK: fir.store %arg1 to %9#1 : !fir.ref<i32>
!CHECK: fir.if %{{.*}} {
!CHECK: %[[FINAL_I_VAL:.*]]:2 = fir.do_loop %{{.*}} = %{{.*}} to %{{.*}} step %{{.*}} iter_args(%[[I_VAL:.*]] = %{{.*}}) -> (index, i32) {
!CHECK: fir.store %[[I_VAL]] to %[[I_PVT_DECL]]#1 : !fir.ref<i32>
diff --git a/flang/test/Lower/OpenMP/hlfir-wsloop.f90 b/flang/test/Lower/OpenMP/hlfir-wsloop.f90
index fc5b5999f270..fea05ae3d6bc 100644
--- a/flang/test/Lower/OpenMP/hlfir-wsloop.f90
+++ b/flang/test/Lower/OpenMP/hlfir-wsloop.f90
@@ -10,7 +10,7 @@ subroutine simple_loop
! CHECK-DAG: %[[WS_END:.*]] = arith.constant 9 : i32
! CHECK: omp.parallel
!$OMP PARALLEL
- ! CHECK-DAG: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}}
+ ! CHECK-DAG: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned}
! CHECK: %[[IV:.*]] = fir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loopEi"} : (!fir.ref<i32>) -> !fir.ref<i32>
! CHECK: omp.wsloop {
! CHECK-NEXT: omp.loop_nest (%[[I:.*]]) : i32 = (%[[WS_ST]]) to (%[[WS_END]]) inclusive step (%[[WS_ST]]) {
diff --git a/flang/test/Lower/OpenMP/lastprivate-iv.f90 b/flang/test/Lower/OpenMP/lastprivate-iv.f90
index 61c588732882..718c9c99370e 100644
--- a/flang/test/Lower/OpenMP/lastprivate-iv.f90
+++ b/flang/test/Lower/OpenMP/lastprivate-iv.f90
@@ -2,13 +2,10 @@
! RUN: %flang_fc1 -emit-hlfir -fopenmp -o - %s 2>&1 | FileCheck %s
!CHECK-LABEL: func @_QPlastprivate_iv_inc
-
+!CHECK: %[[I_MEM:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+!CHECK: %[[I:.*]]:2 = hlfir.declare %[[I_MEM]] {uniq_name = "_QFlastprivate_iv_incEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: %[[I2_MEM:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFlastprivate_iv_incEi"}
!CHECK: %[[I2:.*]]:2 = hlfir.declare %[[I2_MEM]] {uniq_name = "_QFlastprivate_iv_incEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
-!CHECK: %[[I_MEM:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-!CHECK: %[[I:.*]]:2 = hlfir.declare %[[I_MEM]] {uniq_name = "_QFlastprivate_iv_incEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
!CHECK: %[[LB:.*]] = arith.constant 4 : i32
!CHECK: %[[UB:.*]] = arith.constant 10 : i32
!CHECK: %[[STEP:.*]] = arith.constant 3 : i32
@@ -40,13 +37,10 @@ subroutine lastprivate_iv_inc()
end subroutine
!CHECK-LABEL: func @_QPlastprivate_iv_dec
-
+!CHECK: %[[I_MEM:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+!CHECK: %[[I:.*]]:2 = hlfir.declare %[[I_MEM]] {uniq_name = "_QFlastprivate_iv_decEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: %[[I2_MEM:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFlastprivate_iv_decEi"}
!CHECK: %[[I2:.*]]:2 = hlfir.declare %[[I2_MEM]] {uniq_name = "_QFlastprivate_iv_decEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
-!CHECK: %[[I_MEM:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-!CHECK: %[[I:.*]]:2 = hlfir.declare %[[I_MEM]] {uniq_name = "_QFlastprivate_iv_decEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
!CHECK: %[[LB:.*]] = arith.constant 10 : i32
!CHECK: %[[UB:.*]] = arith.constant 1 : i32
!CHECK: %[[STEP:.*]] = arith.constant -3 : i32
@@ -76,3 +70,22 @@ subroutine lastprivate_iv_dec()
end do
!$omp end do
end subroutine
+
+
+!CHECK-LABEL: @_QPlastprivate_iv_i1
+subroutine lastprivate_iv_i1
+ integer*1 :: i1
+ i1=0
+!CHECK: omp.wsloop
+!CHECK: omp.loop_nest
+!CHECK: fir.if %{{.*}} {
+!CHECK: %[[I8_VAL:.*]] = fir.convert %{{.*}} : (i32) -> i8
+!CHECK: fir.store %[[I8_VAL]] to %[[IV:.*]]#1 : !fir.ref<i8>
+!CHECK: %[[IV_VAL:.*]] = fir.load %[[IV]]#0 : !fir.ref<i8>
+!CHECK: hlfir.assign %[[IV_VAL]] to %{{.*}}#0 temporary_lhs : i8, !fir.ref<i8>
+!CHECK: }
+ !$omp do lastprivate(i1)
+ do i1=1,8
+ enddo
+!$omp end do
+end subroutine
diff --git a/flang/test/Lower/OpenMP/parallel-lastprivate-clause-scalar.f90 b/flang/test/Lower/OpenMP/parallel-lastprivate-clause-scalar.f90
index be0cc4195c28..e6ee75c8a5be 100644
--- a/flang/test/Lower/OpenMP/parallel-lastprivate-clause-scalar.f90
+++ b/flang/test/Lower/OpenMP/parallel-lastprivate-clause-scalar.f90
@@ -10,7 +10,7 @@
!CHECK-DAG: %[[ARG1_DECL:.*]]:2 = hlfir.declare %[[ARG1_REF]] typeparams %[[FIVE]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFlastprivate_characterEarg1"} : (!fir.ref<!fir.char<1,5>>, index, !fir.dscope) -> (!fir.ref<!fir.char<1,5>>, !fir.ref<!fir.char<1,5>>)
!CHECK: omp.parallel {
-!CHECK-DAG: %[[ARG1_PVT:.*]] = fir.alloca !fir.char<1,5> {bindc_name = "arg1", pinned, {{.*}}}
+!CHECK-DAG: %[[ARG1_PVT:.*]] = fir.alloca !fir.char<1,5> {bindc_name = "arg1",
!CHECK-DAG: %[[ARG1_PVT_DECL:.*]]:2 = hlfir.declare %[[ARG1_PVT]] typeparams %[[FIVE]] {uniq_name = "_QFlastprivate_characterEarg1"} : (!fir.ref<!fir.char<1,5>>, index) -> (!fir.ref<!fir.char<1,5>>, !fir.ref<!fir.char<1,5>>)
! Check that we are accessing the clone inside the loop
@@ -59,7 +59,7 @@ end subroutine
!CHECK: func @_QPlastprivate_int(%[[ARG1:.*]]: !fir.ref<i32> {fir.bindc_name = "arg1"}) {
!CHECK: %[[ARG1_DECL:.*]]:2 = hlfir.declare %[[ARG1]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFlastprivate_intEarg1"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK-DAG: omp.parallel {
-!CHECK-DAG: %[[CLONE:.*]] = fir.alloca i32 {bindc_name = "arg1", pinned, {{.*}}}
+!CHECK-DAG: %[[CLONE:.*]] = fir.alloca i32 {bindc_name = "arg1"
!CHECK-DAG: %[[CLONE_DECL:.*]]:2 = hlfir.declare %[[CLONE]] {uniq_name = "_QFlastprivate_intEarg1"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: omp.wsloop {
!CHECK-NEXT: omp.loop_nest (%[[INDX_WS:.*]]) : {{.*}} {
@@ -100,9 +100,9 @@ end subroutine
!CHECK: %[[ARG1_DECL:.*]]:2 = hlfir.declare %[[ARG1]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFmult_lastprivate_intEarg1"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: %[[ARG2_DECL:.*]]:2 = hlfir.declare %[[ARG2]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFmult_lastprivate_intEarg2"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: omp.parallel {
-!CHECK-DAG: %[[CLONE1:.*]] = fir.alloca i32 {bindc_name = "arg1", pinned, {{.*}}}
+!CHECK-DAG: %[[CLONE1:.*]] = fir.alloca i32 {bindc_name = "arg1"
!CHECK-DAG: %[[CLONE1_DECL:.*]]:2 = hlfir.declare %[[CLONE1]] {uniq_name = "_QFmult_lastprivate_intEarg1"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK-DAG: %[[CLONE2:.*]] = fir.alloca i32 {bindc_name = "arg2", pinned, {{.*}}}
+!CHECK-DAG: %[[CLONE2:.*]] = fir.alloca i32 {bindc_name = "arg2"
!CHECK-DAG: %[[CLONE2_DECL:.*]]:2 = hlfir.declare %[[CLONE2]] {uniq_name = "_QFmult_lastprivate_intEarg2"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: omp.wsloop {
!CHECK-NEXT: omp.loop_nest (%[[INDX_WS:.*]]) : {{.*}} {
@@ -145,9 +145,9 @@ end subroutine
!CHECK: %[[ARG1_DECL:.*]]:2 = hlfir.declare %[[ARG1]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFmult_lastprivate_int2Earg1"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: %[[ARG2_DECL:.*]]:2 = hlfir.declare %[[ARG2]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFmult_lastprivate_int2Earg2"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: omp.parallel {
-!CHECK-DAG: %[[CLONE2:.*]] = fir.alloca i32 {bindc_name = "arg2", pinned, {{.*}}}
+!CHECK-DAG: %[[CLONE2:.*]] = fir.alloca i32 {bindc_name = "arg2"
!CHECK-DAG: %[[CLONE2_DECL:.*]]:2 = hlfir.declare %[[CLONE2]] {uniq_name = "_QFmult_lastprivate_int2Earg2"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-!CHECK-DAG: %[[CLONE1:.*]] = fir.alloca i32 {bindc_name = "arg1", pinned, {{.*}}}
+!CHECK-DAG: %[[CLONE1:.*]] = fir.alloca i32 {bindc_name = "arg1"
!CHECK-DAG: %[[CLONE1_DECL:.*]]:2 = hlfir.declare %[[CLONE1]] {uniq_name = "_QFmult_lastprivate_int2Earg1"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: omp.wsloop {
!CHECK-NEXT: omp.loop_nest (%[[INDX_WS:.*]]) : {{.*}} {
@@ -191,12 +191,12 @@ end subroutine
!CHECK: %[[ARG2_DECL:.*]]:2 = hlfir.declare %[[ARG2]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFfirstpriv_lastpriv_intEarg2"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: omp.parallel {
! Firstprivate update
-!CHECK: %[[CLONE1:.*]] = fir.alloca i32 {bindc_name = "arg1", pinned, {{.*}}}
+!CHECK: %[[CLONE1:.*]] = fir.alloca i32 {bindc_name = "arg1"
!CHECK: %[[CLONE1_DECL:.*]]:2 = hlfir.declare %[[CLONE1]] {uniq_name = "_QFfirstpriv_lastpriv_intEarg1"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: %[[FPV_LD:.*]] = fir.load %[[ARG1_DECL]]#0 : !fir.ref<i32>
!CHECK: hlfir.assign %[[FPV_LD]] to %[[CLONE1_DECL]]#0 temporary_lhs : i32, !fir.ref<i32>
! Lastprivate Allocation
-!CHECK: %[[CLONE2:.*]] = fir.alloca i32 {bindc_name = "arg2", pinned, {{.*}}}
+!CHECK: %[[CLONE2:.*]] = fir.alloca i32 {bindc_name = "arg2"
!CHECK: %[[CLONE2_DECL:.*]]:2 = hlfir.declare %[[CLONE2]] {uniq_name = "_QFfirstpriv_lastpriv_intEarg2"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK-NOT: omp.barrier
!CHECK: omp.wsloop {
@@ -237,16 +237,11 @@ end subroutine
!CHECK: func.func @_QPfirstpriv_lastpriv_int2(%[[ARG1:.*]]: !fir.ref<i32> {fir.bindc_name = "arg1"}) {
!CHECK: %[[ARG1_DECL:.*]]:2 = hlfir.declare %[[ARG1]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFfirstpriv_lastpriv_int2Earg1"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: omp.parallel {
-
! Firstprivate update
-!CHECK: %[[CLONE1:.*]] = fir.alloca i32 {bindc_name = "arg1", pinned, {{.*}}}
+!CHECK: %[[CLONE1:.*]] = fir.alloca i32 {bindc_name = "arg1"
!CHECK: %[[CLONE1_DECL:.*]]:2 = hlfir.declare %[[CLONE1]] {uniq_name = "_QFfirstpriv_lastpriv_int2Earg1"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK-NEXT: %[[FPV_LD:.*]] = fir.load %[[ARG1_DECL]]#0 : !fir.ref<i32>
!CHECK-NEXT: hlfir.assign %[[FPV_LD]] to %[[CLONE1_DECL]]#0 temporary_lhs : i32, !fir.ref<i32>
-
-!CHECK-NEXT: %[[IV:.*]] = fir.alloca i32 {bindc_name = "n", pinned, {{.*}}}
-!CHECK-NEXT: hlfir.declare %[[IV]]
-
!CHECK-NEXT: omp.barrier
!CHECK: omp.wsloop {
!CHECK-NEXT: omp.loop_nest (%[[INDX_WS:.*]]) : {{.*}} {
diff --git a/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90 b/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90
index 19c7b78298ea..d3843c8e241a 100644
--- a/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90
+++ b/flang/test/Lower/OpenMP/parallel-private-clause-fixes.f90
@@ -12,11 +12,11 @@
! CHECK-DAG: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFmultiple_private_fixEx"}
! CHECK-DAG: %[[X_DECL:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFmultiple_private_fixEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: omp.parallel {
-! CHECK-DAG: %[[PRIV_I:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK-DAG: %[[PRIV_I:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK-DAG: %[[PRIV_I_DECL:.*]]:2 = hlfir.declare %[[PRIV_I]] {uniq_name = "_QFmultiple_private_fixEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK-DAG: %[[PRIV_J:.*]] = fir.alloca i32 {bindc_name = "j", pinned, uniq_name = "_QFmultiple_private_fixEj"}
! CHECK-DAG: %[[PRIV_J_DECL:.*]]:2 = hlfir.declare %[[PRIV_J]] {uniq_name = "_QFmultiple_private_fixEj"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-! CHECK-DAG: %[[PRIV_X:.*]] = fir.alloca i32 {bindc_name = "x", pinned, {{.*}}}
+! CHECK-DAG: %[[PRIV_X:.*]] = fir.alloca i32 {bindc_name = "x", pinned
! CHECK-DAG: %[[PRIV_X_DECL:.*]]:2 = hlfir.declare %[[PRIV_X]] {uniq_name = "_QFmultiple_private_fixEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[ONE:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_3:.*]] = fir.load %[[GAMA_DECL]]#0 : !fir.ref<i32>
diff --git a/flang/test/Lower/OpenMP/parallel-private-clause.f90 b/flang/test/Lower/OpenMP/parallel-private-clause.f90
index 7f5bc2565e67..b9b58a135aaa 100644
--- a/flang/test/Lower/OpenMP/parallel-private-clause.f90
+++ b/flang/test/Lower/OpenMP/parallel-private-clause.f90
@@ -292,14 +292,14 @@ subroutine simple_loop_1
real, allocatable :: r;
! FIRDialect: omp.parallel
!$OMP PARALLEL PRIVATE(r)
+ ! FIRDialect: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned}
+
+ ! FIRDialect: %[[ALLOCA_IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loop_1Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! FIRDialect: [[R:%.*]] = fir.alloca !fir.box<!fir.heap<f32>> {bindc_name = "r", pinned, uniq_name = "{{.*}}Er"}
! FIRDialect: fir.store {{%.*}} to [[R]] : !fir.ref<!fir.box<!fir.heap<f32>>>
! FIRDialect: fir.store {{%.*}} to [[R]] : !fir.ref<!fir.box<!fir.heap<f32>>>
! FIRDialect: %[[R_DECL:.*]]:2 = hlfir.declare [[R]] {fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "_QFsimple_loop_1Er"} : (!fir.ref<!fir.box<!fir.heap<f32>>>) -> (!fir.ref<!fir.box<!fir.heap<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>)
- ! FIRDialect: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}}
- ! FIRDialect: %[[ALLOCA_IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loop_1Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! FIRDialect: %[[WS_LB:.*]] = arith.constant 1 : i32
! FIRDialect: %[[WS_UB:.*]] = arith.constant 9 : i32
! FIRDialect: %[[WS_STEP:.*]] = arith.constant 1 : i32
@@ -332,14 +332,14 @@ subroutine simple_loop_2
real, allocatable :: r;
! FIRDialect: omp.parallel
!$OMP PARALLEL
+ ! FIRDialect: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned}
+
+ ! FIRDialect: %[[ALLOCA_IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "{{.*}}Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! FIRDialect: [[R:%.*]] = fir.alloca !fir.box<!fir.heap<f32>> {bindc_name = "r", pinned, uniq_name = "{{.*}}Er"}
! FIRDialect: fir.store {{%.*}} to [[R]] : !fir.ref<!fir.box<!fir.heap<f32>>>
! FIRDialect: fir.store {{%.*}} to [[R]] : !fir.ref<!fir.box<!fir.heap<f32>>>
! FIRDialect: %[[R_DECL:.*]]:2 = hlfir.declare [[R]] {fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "{{.*}}Er"} : (!fir.ref<!fir.box<!fir.heap<f32>>>) -> (!fir.ref<!fir.box<!fir.heap<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>)
- ! FIRDialect: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}}
- ! FIRDialect: %[[ALLOCA_IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "{{.*}}Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! FIRDialect: %[[WS_LB:.*]] = arith.constant 1 : i32
! FIRDialect: %[[WS_UB:.*]] = arith.constant 9 : i32
! FIRDialect: %[[WS_STEP:.*]] = arith.constant 1 : i32
@@ -371,15 +371,14 @@ subroutine simple_loop_3
integer :: i
real, allocatable :: r;
! FIRDialect: omp.parallel
+ ! FIRDialect: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned}
+ ! FIRDialect: %[[ALLOCA_IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "{{.*}}Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! FIRDialect: [[R:%.*]] = fir.alloca !fir.box<!fir.heap<f32>> {bindc_name = "r", pinned, uniq_name = "{{.*}}Er"}
! FIRDialect: fir.store {{%.*}} to [[R]] : !fir.ref<!fir.box<!fir.heap<f32>>>
! FIRDialect: fir.store {{%.*}} to [[R]] : !fir.ref<!fir.box<!fir.heap<f32>>>
! FIRDialect: [[R_DECL:%.*]]:2 = hlfir.declare [[R]] {fortran_attrs = #fir.var_attrs<allocatable>, uniq_name = "{{.*}}Er"} : (!fir.ref<!fir.box<!fir.heap<f32>>>) -> (!fir.ref<!fir.box<!fir.heap<f32>>>, !fir.ref<!fir.box<!fir.heap<f32>>>)
- ! FIRDialect: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}}
- ! FIRDialect: %[[ALLOCA_IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "{{.*}}Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! FIRDialect: %[[WS_LB:.*]] = arith.constant 1 : i32
! FIRDialect: %[[WS_UB:.*]] = arith.constant 9 : i32
! FIRDialect: %[[WS_STEP:.*]] = arith.constant 1 : i32
diff --git a/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90 b/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90
index fdb7e974f1c5..f6d3b0b73f73 100644
--- a/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90
+++ b/flang/test/Lower/OpenMP/parallel-reduction-allocatable-array.f90
@@ -90,7 +90,7 @@ end program
! CHECK: %[[VAL_11:.*]] = fir.embox %[[VAL_9]](%[[VAL_10]]) : (!fir.heap<!fir.array<?xi32>>, !fir.shape<1>) -> !fir.box<!fir.heap<!fir.array<?xi32>>>
! CHECK: fir.store %[[VAL_11]] to %[[VAL_3]]#1 : !fir.ref<!fir.box<!fir.heap<!fir.array<?xi32>>>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_12:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_12:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_13:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_14:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_15:.*]] = arith.constant 10 : i32
diff --git a/flang/test/Lower/OpenMP/parallel-reduction3.f90 b/flang/test/Lower/OpenMP/parallel-reduction3.f90
index 879f59dfad6b..47b743a558b4 100644
--- a/flang/test/Lower/OpenMP/parallel-reduction3.f90
+++ b/flang/test/Lower/OpenMP/parallel-reduction3.f90
@@ -67,7 +67,7 @@
! CHECK: %[[VAL_13:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_12]]#0 : i32, !fir.box<!fir.array<?xi32>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_17:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/parallel-wsloop-firstpriv.f90 b/flang/test/Lower/OpenMP/parallel-wsloop-firstpriv.f90
index 33dab125b3b2..c32eb2400a34 100644
--- a/flang/test/Lower/OpenMP/parallel-wsloop-firstpriv.f90
+++ b/flang/test/Lower/OpenMP/parallel-wsloop-firstpriv.f90
@@ -11,15 +11,12 @@ subroutine omp_do_firstprivate(a)
n = a+1
!$omp parallel do firstprivate(a)
! CHECK: omp.parallel {
-
+ ! CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+ ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_firstprivateEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[A_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "a", pinned, uniq_name = "_QFomp_do_firstprivateEa"}
! CHECK: %[[A_PVT_DECL:.*]]:2 = hlfir.declare %[[A_PVT_REF]] {uniq_name = "_QFomp_do_firstprivateEa"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK-NEXT: %[[LD:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref<i32>
! CHECK-NEXT: hlfir.assign %[[LD]] to %[[A_PVT_DECL]]#0 temporary_lhs : i32, !fir.ref<i32>
-
- ! CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
- ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_firstprivateEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! CHECK: %[[LB:.*]] = arith.constant 1 : i32
! CHECK-NEXT: %[[UB:.*]] = fir.load %[[A_PVT_DECL]]#0 : !fir.ref<i32>
! CHECK-NEXT: %[[STEP:.*]] = arith.constant 1 : i32
@@ -48,19 +45,17 @@ subroutine omp_do_firstprivate2(a, n)
n = a+1
!$omp parallel do firstprivate(a, n)
! CHECK: omp.parallel {
-
- ! CHECK: %[[A_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "a", pinned, {{.*}}}
+ ! CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+ ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_firstprivate2Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+ ! CHECK: %[[A_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "a", pinned
! CHECK: %[[A_PVT_DECL:.*]]:2 = hlfir.declare %[[A_PVT_REF]] {uniq_name = "_QFomp_do_firstprivate2Ea"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[LD:.*]] = fir.load %[[ARG0_DECL]]#0 : !fir.ref<i32>
! CHECK: hlfir.assign %[[LD]] to %[[A_PVT_DECL]]#0 temporary_lhs : i32, !fir.ref<i32>
-
! CHECK: %[[N_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "n", pinned, uniq_name = "_QFomp_do_firstprivate2En"}
! CHECK: %[[N_PVT_DECL:.*]]:2 = hlfir.declare %[[N_PVT_REF]] {uniq_name = "_QFomp_do_firstprivate2En"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[LD1:.*]] = fir.load %[[ARG1_DECL]]#0 : !fir.ref<i32>
! CHECK: hlfir.assign %[[LD1]] to %[[N_PVT_DECL]]#0 temporary_lhs : i32, !fir.ref<i32>
- ! CHECK: %[[I_PVT_REF:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
- ! CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_REF]] {uniq_name = "_QFomp_do_firstprivate2Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[LB:.*]] = fir.load %[[A_PVT_DECL]]#0 : !fir.ref<i32>
! CHECK: %[[UB:.*]] = fir.load %[[N_PVT_DECL]]#0 : !fir.ref<i32>
diff --git a/flang/test/Lower/OpenMP/parallel-wsloop.f90 b/flang/test/Lower/OpenMP/parallel-wsloop.f90
index e5c303d7bb2e..5fa42da2269f 100644
--- a/flang/test/Lower/OpenMP/parallel-wsloop.f90
+++ b/flang/test/Lower/OpenMP/parallel-wsloop.f90
@@ -147,18 +147,14 @@ end subroutine parallel_private_do
! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32> {fir.bindc_name = "nt"}) {
! CHECK: %[[NT_DECL:.*]]:2 = hlfir.declare %[[VAL_1]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFparallel_private_doEnt"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: omp.parallel {
-
+! CHECK: %[[I_PRIV:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV]] {uniq_name = "_QFparallel_private_doEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[COND_ADDR:.*]] = fir.alloca !fir.logical<4> {bindc_name = "cond", pinned, uniq_name = "_QFparallel_private_doEcond"}
! CHECK: %[[COND_DECL:.*]]:2 = hlfir.declare %[[COND_ADDR]] {uniq_name = "_QFparallel_private_doEcond"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-
! CHECK: %[[NT_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "nt", pinned, uniq_name = "_QFparallel_private_doEnt"}
! CHECK: %[[NT_PRIV_DECL:.*]]:2 = hlfir.declare %[[NT_PRIV_ADDR]] {uniq_name = "_QFparallel_private_doEnt"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[NT:.*]] = fir.load %[[NT_DECL]]#0 : !fir.ref<i32>
! CHECK: hlfir.assign %[[NT]] to %[[NT_PRIV_DECL]]#0 temporary_lhs : i32, !fir.ref<i32>
-
-! CHECK: %[[I_PRIV:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV]] {uniq_name = "_QFparallel_private_doEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 9 : i32
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
@@ -198,20 +194,16 @@ end subroutine omp_parallel_multiple_firstprivate_do
! CHECK: %[[A_DECL:.*]]:2 = hlfir.declare %[[A_ADDR]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFomp_parallel_multiple_firstprivate_doEa"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[B_DECL:.*]]:2 = hlfir.declare %[[B_ADDR]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFomp_parallel_multiple_firstprivate_doEb"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: omp.parallel {
-
+! CHECK: %[[I_PRIV_ADDR:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_multiple_firstprivate_doEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[A_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "a", pinned, uniq_name = "_QFomp_parallel_multiple_firstprivate_doEa"}
! CHECK: %[[A_PRIV_DECL:.*]]:2 = hlfir.declare %[[A_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_multiple_firstprivate_doEa"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[A:.*]] = fir.load %[[A_DECL]]#0 : !fir.ref<i32>
! CHECK: hlfir.assign %[[A]] to %[[A_PRIV_DECL]]#0 temporary_lhs : i32, !fir.ref<i32>
-
! CHECK: %[[B_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "b", pinned, uniq_name = "_QFomp_parallel_multiple_firstprivate_doEb"}
! CHECK: %[[B_PRIV_DECL:.*]]:2 = hlfir.declare %[[B_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_multiple_firstprivate_doEb"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[B:.*]] = fir.load %[[B_DECL]]#0 : !fir.ref<i32>
! CHECK: hlfir.assign %[[B]] to %[[B_PRIV_DECL]]#0 temporary_lhs : i32, !fir.ref<i32>
-
-! CHECK: %[[I_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_multiple_firstprivate_doEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_9:.*]] = arith.constant 10 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32
@@ -252,18 +244,14 @@ end subroutine parallel_do_private
! CHECK-SAME: %[[VAL_1:.*]]: !fir.ref<i32> {fir.bindc_name = "nt"}) {
! CHECK: %[[NT_DECL:.*]]:2 = hlfir.declare %[[VAL_1]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFparallel_do_privateEnt"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: omp.parallel {
-
+! CHECK: %[[I_PRIV_ADDR:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV_ADDR]] {uniq_name = "_QFparallel_do_privateEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[COND_PRIV_ADDR:.*]] = fir.alloca !fir.logical<4> {bindc_name = "cond", pinned, uniq_name = "_QFparallel_do_privateEcond"}
! CHECK: %[[COND_PRIV_DECL:.*]]:2 = hlfir.declare %[[COND_PRIV_ADDR]] {uniq_name = "_QFparallel_do_privateEcond"} : (!fir.ref<!fir.logical<4>>) -> (!fir.ref<!fir.logical<4>>, !fir.ref<!fir.logical<4>>)
-
! CHECK: %[[NT_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "nt", pinned, uniq_name = "_QFparallel_do_privateEnt"}
! CHECK: %[[NT_PRIV_DECL:.*]]:2 = hlfir.declare %[[NT_PRIV_ADDR]] {uniq_name = "_QFparallel_do_privateEnt"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[NT_VAL:.*]] = fir.load %[[NT_DECL]]#0 : !fir.ref<i32>
! CHECK: hlfir.assign %[[NT_VAL]] to %[[NT_PRIV_DECL]]#0 temporary_lhs : i32, !fir.ref<i32>
-
-! CHECK: %[[I_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV_ADDR]] {uniq_name = "_QFparallel_do_privateEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 9 : i32
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
@@ -303,20 +291,16 @@ end subroutine omp_parallel_do_multiple_firstprivate
! CHECK: %[[A_DECL:.*]]:2 = hlfir.declare %[[A_ADDR]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEa"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[B_DECL:.*]]:2 = hlfir.declare %[[B_ADDR]] dummy_scope %{{[0-9]+}} {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEb"} : (!fir.ref<i32>, !fir.dscope) -> (!fir.ref<i32>, !fir.ref<i32>
! CHECK: omp.parallel {
-
+! CHECK: %[[I_PRIV_ADDR:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[A_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "a", pinned, uniq_name = "_QFomp_parallel_do_multiple_firstprivateEa"}
! CHECK: %[[A_PRIV_DECL:.*]]:2 = hlfir.declare %[[A_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEa"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[A:.*]] = fir.load %[[A_DECL]]#0 : !fir.ref<i32>
! CHECK: hlfir.assign %[[A]] to %[[A_PRIV_DECL]]#0 temporary_lhs : i32, !fir.ref<i32>
-
! CHECK: %[[B_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "b", pinned, uniq_name = "_QFomp_parallel_do_multiple_firstprivateEb"}
! CHECK: %[[B_PRIV_DECL:.*]]:2 = hlfir.declare %[[B_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEb"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[B:.*]] = fir.load %[[B_DECL]]#0 : !fir.ref<i32>
! CHECK: hlfir.assign %[[B]] to %[[B_PRIV_DECL]]#0 temporary_lhs : i32, !fir.ref<i32>
-
-! CHECK: %[[I_PRIV_ADDR:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-! CHECK: %[[I_PRIV_DECL:.*]]:2 = hlfir.declare %[[I_PRIV_ADDR]] {uniq_name = "_QFomp_parallel_do_multiple_firstprivateEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! CHECK: %[[VAL_8:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_9:.*]] = arith.constant 10 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32
diff --git a/flang/test/Lower/OpenMP/stop-stmt-in-region.f90 b/flang/test/Lower/OpenMP/stop-stmt-in-region.f90
index f8f2604caacc..4f3819c5e4eb 100644
--- a/flang/test/Lower/OpenMP/stop-stmt-in-region.f90
+++ b/flang/test/Lower/OpenMP/stop-stmt-in-region.f90
@@ -73,16 +73,12 @@ subroutine test_stop_in_region3()
end
! CHECK-LABEL: func.func @_QPtest_stop_in_region4() {
-
+! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+! CHECK: %[[VAL_0_DECL:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFtest_stop_in_region4Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_1:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFtest_stop_in_region4Ei"}
! CHECK: %[[VAL_1_DECL:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFtest_stop_in_region4Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFtest_stop_in_region4Ex"}
! CHECK: %[[VAL_2_DECL:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFtest_stop_in_region4Ex"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
-! CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-! CHECK: %[[VAL_0_DECL:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFtest_stop_in_region4Ei"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! CHECK: %[[VAL_3:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_4:.*]] = arith.constant 10 : i32
! CHECK: %[[VAL_5:.*]] = arith.constant 1 : i32
diff --git a/flang/test/Lower/OpenMP/target.f90 b/flang/test/Lower/OpenMP/target.f90
index 0fcbad7e58fe..9bb855e44694 100644
--- a/flang/test/Lower/OpenMP/target.f90
+++ b/flang/test/Lower/OpenMP/target.f90
@@ -593,7 +593,7 @@ subroutine omp_target_parallel_do
!CHECK: %[[VAL_0_DECL:.*]]:2 = hlfir.declare %[[ARG_0]](%{{.*}}) {uniq_name = "_QFomp_target_parallel_doEa"} : (!fir.ref<!fir.array<1024xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<1024xi32>>, !fir.ref<!fir.array<1024xi32>>)
!CHECK: omp.parallel
!$omp target parallel do map(tofrom: a)
- !CHECK: %[[I_PVT_ALLOCA:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+ !CHECK: %[[I_PVT_ALLOCA:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
!CHECK: %[[I_PVT_DECL:.*]]:2 = hlfir.declare %[[I_PVT_ALLOCA]] {uniq_name = "_QFomp_target_parallel_doEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: omp.wsloop {
!CHECK-NEXT: omp.loop_nest (%[[I_VAL:.*]]) : i32
diff --git a/flang/test/Lower/OpenMP/unstructured.f90 b/flang/test/Lower/OpenMP/unstructured.f90
index b36e4f37a745..6a1331799d54 100644
--- a/flang/test/Lower/OpenMP/unstructured.f90
+++ b/flang/test/Lower/OpenMP/unstructured.f90
@@ -62,14 +62,14 @@ end
! CHECK: omp.parallel {
! CHECK: %[[ALLOCA_K:.*]] = fir.alloca i32 {bindc_name = "k", pinned}
! CHECK: %[[K_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_K]] {uniq_name = "_QFss3Ek"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
+! CHECK: %[[ALLOCA_1:.*]] = fir.alloca i32 {{{.*}}, pinned}
+! CHECK: %[[OMP_LOOP_J_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_1]] {uniq_name = "_QFss3Ej"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+! CHECK: %[[ALLOCA_2:.*]] = fir.alloca i32 {{{.*}}, pinned}
+! CHECK: %[[OMP_LOOP_K_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_2]] {uniq_name = "_QFss3Ek"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: br ^bb1
! CHECK: ^bb1: // 2 preds: ^bb0, ^bb3
! CHECK: cond_br %{{[0-9]*}}, ^bb2, ^bb4
! CHECK: ^bb2: // pred: ^bb1
-
-! CHECK: %[[ALLOCA_2:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}}
-! CHECK: %[[OMP_LOOP_K_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_2]] {uniq_name = "_QFss3Ek"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: omp.wsloop {
! CHECK: omp.loop_nest (%[[ARG1:.*]]) : {{.*}} {
! CHECK: fir.store %[[ARG1]] to %[[OMP_LOOP_K_DECL]]#1 : !fir.ref<i32>
@@ -80,10 +80,6 @@ end
! CHECK: }
! CHECK: omp.terminator
! CHECK: }
-
-! CHECK: %[[ALLOCA_1:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}}
-! CHECK: %[[OMP_LOOP_J_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_1]] {uniq_name = "_QFss3Ej"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
! CHECK: omp.wsloop {
! CHECK: omp.loop_nest (%[[ARG2:.*]]) : {{.*}} {
! CHECK: fir.store %[[ARG2]] to %[[OMP_LOOP_J_DECL]]#1 : !fir.ref<i32>
@@ -129,7 +125,7 @@ end
! CHECK-LABEL: func @_QPss4{{.*}} {
! CHECK: omp.parallel {
-! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 {{{.*}}, pinned, uniq_name = "_QFss4Ej"}
+! CHECK: %[[ALLOCA:.*]] = fir.alloca i32 {{{.*}}, pinned}
! CHECK: %[[OMP_LOOP_J_DECL:.*]]:2 = hlfir.declare %[[ALLOCA]] {uniq_name = "_QFss4Ej"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: omp.wsloop {
! CHECK-NEXT: omp.loop_nest (%[[ARG:.*]]) : {{.*}} {
diff --git a/flang/test/Lower/OpenMP/wsloop-collapse.f90 b/flang/test/Lower/OpenMP/wsloop-collapse.f90
index 67351ca275ef..d9541e176f6a 100644
--- a/flang/test/Lower/OpenMP/wsloop-collapse.f90
+++ b/flang/test/Lower/OpenMP/wsloop-collapse.f90
@@ -4,56 +4,40 @@
!CHECK-LABEL: func.func @_QQmain() attributes {fir.bindc_name = "wsloop_collapse"} {
program wsloop_collapse
+!CHECK: %[[VAL_0:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+!CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFEk"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+!CHECK: %[[VAL_2:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+!CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFEj"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
+!CHECK: %[[VAL_4:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
+!CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: %[[VAL_6:.*]] = fir.alloca i32 {bindc_name = "a", uniq_name = "_QFEa"}
!CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFEa"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
!CHECK: %[[VAL_8:.*]] = fir.alloca i32 {bindc_name = "b", uniq_name = "_QFEb"}
!CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFEb"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
!CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "c", uniq_name = "_QFEc"}
!CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFEc"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
-
!CHECK: %[[VAL_12:.*]] = fir.alloca i32 {bindc_name = "i", uniq_name = "_QFEi"}
!CHECK: %[[VAL_13:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
!CHECK: %[[VAL_14:.*]] = fir.alloca i32 {bindc_name = "j", uniq_name = "_QFEj"}
!CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFEj"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
!CHECK: %[[VAL_16:.*]] = fir.alloca i32 {bindc_name = "k", uniq_name = "_QFEk"}
!CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFEk"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
!CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "x", uniq_name = "_QFEx"}
!CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFEx"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
+ integer :: i, j, k
+ integer :: a, b, c
+ integer :: x
!CHECK: %[[VAL_20:.*]] = arith.constant 3 : i32
!CHECK: hlfir.assign %[[VAL_20]] to %[[VAL_7]]#0 : i32, !fir.ref<i32>
-
+ a=3
!CHECK: %[[VAL_21:.*]] = arith.constant 2 : i32
!CHECK: hlfir.assign %[[VAL_21]] to %[[VAL_9]]#0 : i32, !fir.ref<i32>
-
+ b=2
!CHECK: %[[VAL_22:.*]] = arith.constant 5 : i32
!CHECK: hlfir.assign %[[VAL_22]] to %[[VAL_11]]#0 : i32, !fir.ref<i32>
-
+ c=5
!CHECK: %[[VAL_23:.*]] = arith.constant 0 : i32
!CHECK: hlfir.assign %[[VAL_23]] to %[[VAL_19]]#0 : i32, !fir.ref<i32>
-
-!CHECK: %[[VAL_4:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
-!CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
-!CHECK: %[[VAL_2:.*]] = fir.alloca i32 {bindc_name = "j", pinned, {{.*}}}
-!CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFEj"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
-!CHECK: %[[VAL_0:.*]] = fir.alloca i32 {bindc_name = "k", pinned, {{.*}}}
-!CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFEk"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
- integer :: i, j, k
- integer :: a, b, c
- integer :: x
-
- a=3
- b=2
- c=5
x=0
!CHECK: %[[VAL_24:.*]] = arith.constant 1 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-monotonic.f90 b/flang/test/Lower/OpenMP/wsloop-monotonic.f90
index 2a5cc2321c64..531d995052f6 100644
--- a/flang/test/Lower/OpenMP/wsloop-monotonic.f90
+++ b/flang/test/Lower/OpenMP/wsloop-monotonic.f90
@@ -11,7 +11,7 @@ program wsloop_dynamic
!CHECK: omp.parallel {
!$OMP DO SCHEDULE(monotonic:dynamic)
-!CHECK: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}}
+!CHECK: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned}
!CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32
!CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32
!CHECK: %[[WS_STEP:.*]] = arith.constant 1 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-nonmonotonic.f90 b/flang/test/Lower/OpenMP/wsloop-nonmonotonic.f90
index 1e71807ae5b1..420bc0bffaec 100644
--- a/flang/test/Lower/OpenMP/wsloop-nonmonotonic.f90
+++ b/flang/test/Lower/OpenMP/wsloop-nonmonotonic.f90
@@ -12,7 +12,7 @@ program wsloop_dynamic
!CHECK: omp.parallel {
!$OMP DO SCHEDULE(nonmonotonic:dynamic)
-!CHECK: %[[I_REF:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}}
+!CHECK: %[[I_REF:.*]] = fir.alloca i32 {{{.*}}, pinned}
!CHECK: %[[ALLOCA_IV:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32
!CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-add-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-add-byref.f90
index 8dc2b43ad56a..cc4d055b3ea1 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-add-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-add-byref.f90
@@ -77,7 +77,7 @@
! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
@@ -120,7 +120,7 @@ end subroutine
! CHECK: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
@@ -164,7 +164,7 @@ end subroutine
! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
@@ -206,7 +206,7 @@ end subroutine
! CHECK: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
@@ -257,7 +257,7 @@ end subroutine
! CHECK: %[[VAL_10:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_14:.*]] = arith.constant 100 : i32
@@ -321,7 +321,7 @@ end subroutine
! CHECK: %[[VAL_10:.*]] = arith.constant 0.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_14:.*]] = arith.constant 100 : i32
@@ -392,7 +392,7 @@ end subroutine
! CHECK: %[[VAL_13:.*]] = arith.constant 0.000000e+00 : f64
! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_3]]#0 : f64, !fir.ref<f64>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_17:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir-byref.f90
index cef86d1c1bd4..ec9abba41db0 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir-byref.f90
@@ -26,7 +26,7 @@
! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir.f90
index d0ba2cdff817..c5cc5a95cef1 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-add-hlfir.f90
@@ -22,7 +22,7 @@
! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-add.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-add.f90
index cd7c362e3c0d..5b957959f40d 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-add.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-add.f90
@@ -53,7 +53,7 @@
! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
@@ -96,7 +96,7 @@ end subroutine
! CHECK: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
@@ -140,7 +140,7 @@ end subroutine
! CHECK: %[[VAL_4:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
@@ -182,7 +182,7 @@ end subroutine
! CHECK: %[[VAL_4:.*]] = arith.constant 0.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 100 : i32
@@ -233,7 +233,7 @@ end subroutine
! CHECK: %[[VAL_10:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_14:.*]] = arith.constant 100 : i32
@@ -297,7 +297,7 @@ end subroutine
! CHECK: %[[VAL_10:.*]] = arith.constant 0.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_14:.*]] = arith.constant 100 : i32
@@ -368,7 +368,7 @@ end subroutine
! CHECK: %[[VAL_13:.*]] = arith.constant 0.000000e+00 : f64
! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_3]]#0 : f64, !fir.ref<f64>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_17:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90
index 13858ff2f34e..ccf186cf9908 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-allocatable.f90
@@ -75,7 +75,7 @@ end program
! CHECK: %[[VAL_8:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_8]] to %[[VAL_5]]#0 realloc : i32, !fir.ref<!fir.box<!fir.heap<i32>>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_9:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_9:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_10:.*]]:2 = hlfir.declare %[[VAL_9]] {uniq_name = "_QFEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_11:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_12:.*]] = arith.constant 10 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90
index 713cc2c0f02d..be1e3ec0cb9d 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-array-assumed-shape.f90
@@ -77,7 +77,7 @@ end program
! CHECK: %[[VAL_2:.*]]:2 = hlfir.declare %[[VAL_1]] {uniq_name = "_QFFreduceEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_0]] dummy_scope %{{[0-9]+}} {fortran_attrs = {{.*}}, uniq_name = "_QFFreduceEr"} : (!fir.box<!fir.array<?xf64>>, !fir.dscope) -> (!fir.box<!fir.array<?xf64>>, !fir.box<!fir.array<?xf64>>)
! CHECK: omp.parallel {
-! CHECK: %[[VAL_4:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_4:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFFreduceEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_7:.*]] = arith.constant 10 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array.f90
index 18a76ed1d5a8..add422f5d956 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-array.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-array.f90
@@ -71,7 +71,7 @@ end program
! CHECK: %[[VAL_4:.*]] = fir.shape %[[VAL_3]] : (index) -> !fir.shape<1>
! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_2]](%[[VAL_4]]) {uniq_name = "_QFEr"} : (!fir.ref<!fir.array<2xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<2xi32>>, !fir.ref<!fir.array<2xi32>>)
! CHECK: omp.parallel {
-! CHECK: %[[VAL_6:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_6:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_8:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_9:.*]] = arith.constant 10 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90
index 9c2cb862ba6c..e988567fc337 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-array2.f90
@@ -71,7 +71,7 @@ end program
! CHECK: %[[VAL_4:.*]] = fir.shape %[[VAL_3]] : (index) -> !fir.shape<1>
! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_2]](%[[VAL_4]]) {uniq_name = "_QFEr"} : (!fir.ref<!fir.array<2xi32>>, !fir.shape<1>) -> (!fir.ref<!fir.array<2xi32>>, !fir.ref<!fir.array<2xi32>>)
! CHECK: omp.parallel {
-! CHECK: %[[VAL_6:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_6:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_8:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_9:.*]] = arith.constant 10 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-iand-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-iand-byref.f90
index 8eb4f4c6eb4c..bcc71415f1dd 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-iand-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-iand-byref.f90
@@ -30,7 +30,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iandEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-iand.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-iand.f90
index 713e50f72e9d..ae771c692b98 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-iand.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-iand.f90
@@ -24,7 +24,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iandEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-ieor-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-ieor-byref.f90
index 6a5d942cb74e..6825800b2cd8 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-ieor-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-ieor-byref.f90
@@ -26,7 +26,7 @@
!CHECK: omp.parallel
-!CHECK: %[[I_REF:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+!CHECK: %[[I_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_ieorEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: omp.wsloop reduction(byref @ieor_byref_i32 %[[X_DECL]]#0 -> %[[PRV:.+]] : !fir.ref<i32>)
!CHECK-NEXT: omp.loop_nest
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-ieor.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-ieor.f90
index 958cc6f87e4f..d50f6b854f48 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-ieor.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-ieor.f90
@@ -17,7 +17,7 @@
!CHECK: omp.parallel
-!CHECK: %[[I_REF:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+!CHECK: %[[I_REF:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
!CHECK: %[[I_DECL:.*]]:2 = hlfir.declare %[[I_REF]] {uniq_name = "_QFreduction_ieorEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
!CHECK: omp.wsloop reduction(@[[IEOR_DECLARE_I]] %[[X_DECL]]#0 -> %[[PRV:.+]] : !fir.ref<i32>)
!CHECK-NEXT: omp.loop_nest
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-ior-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-ior-byref.f90
index 2956cd9ef53c..00f4552917aa 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-ior-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-ior-byref.f90
@@ -28,7 +28,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iorEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-ior.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-ior.f90
index a80838047bac..182f1eaeeeb7 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-ior.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-ior.f90
@@ -24,7 +24,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_iorEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-and-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-and-byref.f90
index b505585e5cb0..331232a08e62 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-and-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-and-byref.f90
@@ -37,7 +37,7 @@
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -87,7 +87,7 @@ end subroutine simple_reduction
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -146,7 +146,7 @@ end subroutine
! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90
index 65781d6c3652..078a463919e9 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-and.f90
@@ -31,7 +31,7 @@
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -81,7 +81,7 @@ end subroutine simple_reduction
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -140,7 +140,7 @@ end subroutine
! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv-byref.f90
index a103bf58e16b..d1cb7b05f0af 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv-byref.f90
@@ -37,7 +37,7 @@
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -86,7 +86,7 @@ end subroutine
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -145,7 +145,7 @@ end subroutine
! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90
index c77a2bef0dd2..8204e88815f3 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-eqv.f90
@@ -31,7 +31,7 @@
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -80,7 +80,7 @@ end subroutine
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -139,7 +139,7 @@ end subroutine
! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv-byref.f90
index 8abc9b61c42e..64c056c9c6bd 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv-byref.f90
@@ -37,7 +37,7 @@
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -87,7 +87,7 @@ end subroutine
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -148,7 +148,7 @@ end subroutine
! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90
index 28e821bb41ec..957de9b6741a 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-neqv.f90
@@ -31,7 +31,7 @@
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -81,7 +81,7 @@ end subroutine
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -142,7 +142,7 @@ end subroutine
! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-or-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-or-byref.f90
index e6def280cf70..1e5f91de801c 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-or-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-or-byref.f90
@@ -36,7 +36,7 @@
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -85,7 +85,7 @@ end subroutine
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -144,7 +144,7 @@ end subroutine
! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90
index 2453efe59e25..d77566b109e5 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-logical-or.f90
@@ -31,7 +31,7 @@
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -80,7 +80,7 @@ end subroutine
! CHECK: %[[VAL_9:.*]] = fir.convert %[[VAL_8]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_9]] to %[[VAL_4]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_10:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFsimple_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_12:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_13:.*]] = arith.constant 100 : i32
@@ -139,7 +139,7 @@ end subroutine
! CHECK: %[[VAL_17:.*]] = fir.convert %[[VAL_16]] : (i1) -> !fir.logical<4>
! CHECK: hlfir.assign %[[VAL_17]] to %[[VAL_11]]#0 : !fir.logical<4>, !fir.ref<!fir.logical<4>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_18:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFmultiple_reductionsEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_20:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_21:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-max-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-max-byref.f90
index 018fb28c6f68..399afa106556 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-max-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-max-byref.f90
@@ -41,7 +41,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
@@ -72,7 +72,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
@@ -93,7 +93,7 @@
! CHECK: omp.terminator
! CHECK: omp.terminator
! CHECK: omp.parallel {
-! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_32:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_33:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir-byref.f90
index 130a580cd685..2f3aee9f1300 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir-byref.f90
@@ -28,7 +28,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir.f90
index 23e2ae98a027..71631fb14592 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-max-hlfir.f90
@@ -24,7 +24,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-max.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-max.f90
index 9a93c75f5bd1..d4e827f3b7e2 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-max.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-max.f90
@@ -35,7 +35,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
@@ -66,7 +66,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
@@ -87,7 +87,7 @@
! CHECK: omp.terminator
! CHECK: omp.terminator
! CHECK: omp.parallel {
-! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_max_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_32:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_33:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-min-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-min-byref.f90
index 754b3fd400d3..ccab017a1b0c 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-min-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-min-byref.f90
@@ -41,7 +41,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
@@ -72,7 +72,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
@@ -95,7 +95,7 @@
! CHECK: omp.terminator
! CHECK: }
! CHECK: omp.parallel {
-! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_32:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_33:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-min.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-min.f90
index 50b2db9463d2..80c056b5e8c5 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-min.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-min.f90
@@ -35,7 +35,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_intEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
@@ -66,7 +66,7 @@
! CHECK: %[[VAL_6:.*]] = arith.constant 0.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_6]] to %[[VAL_4]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_7:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_8:.*]]:2 = hlfir.declare %[[VAL_7]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_9:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_10:.*]] = arith.constant 100 : i32
@@ -89,7 +89,7 @@
! CHECK: omp.terminator
! CHECK: }
! CHECK: omp.parallel {
-! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_30:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_31:.*]]:2 = hlfir.declare %[[VAL_30]] {uniq_name = "_QFreduction_min_realEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_32:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_33:.*]] = arith.constant 100 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-min2.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-min2.f90
index 834fb03c1d14..a4c99f190dd2 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-min2.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-min2.f90
@@ -34,7 +34,7 @@ end program
! CHECK: %[[VAL_2:.*]] = fir.address_of(@_QFEr) : !fir.ref<i32>
! CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFEr"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: omp.parallel {
-! CHECK: %[[VAL_4:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_4:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_6:.*]] = arith.constant 0 : i32
! CHECK: %[[VAL_7:.*]] = arith.constant 10 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-mul-byref.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-mul-byref.f90
index eddb9112d3b0..762d9dd4f254 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-mul-byref.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-mul-byref.f90
@@ -80,7 +80,7 @@
! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32
@@ -118,7 +118,7 @@ end subroutine
! CHECK: %[[VAL_4:.*]] = arith.constant 1.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32
@@ -157,7 +157,7 @@ end subroutine
! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32
@@ -195,7 +195,7 @@ end subroutine
! CHECK: %[[VAL_4:.*]] = arith.constant 1.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32
@@ -242,7 +242,7 @@ end subroutine
! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32
! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_14:.*]] = arith.constant 10 : i32
@@ -302,7 +302,7 @@ end subroutine
! CHECK: %[[VAL_10:.*]] = arith.constant 1.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_14:.*]] = arith.constant 10 : i32
@@ -369,7 +369,7 @@ end subroutine
! CHECK: %[[VAL_13:.*]] = arith.constant 1.000000e+00 : f64
! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_3]]#0 : f64, !fir.ref<f64>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_17:.*]] = arith.constant 10 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-mul.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-mul.f90
index 573f1f431c18..08be4d84c1a6 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-mul.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-mul.f90
@@ -55,7 +55,7 @@
! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32
@@ -93,7 +93,7 @@ end subroutine
! CHECK: %[[VAL_4:.*]] = arith.constant 1.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reductionEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32
@@ -132,7 +132,7 @@ end subroutine
! CHECK: %[[VAL_4:.*]] = arith.constant 1 : i32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_int_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32
@@ -170,7 +170,7 @@ end subroutine
! CHECK: %[[VAL_4:.*]] = arith.constant 1.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_4]] to %[[VAL_3]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_5:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_6:.*]]:2 = hlfir.declare %[[VAL_5]] {uniq_name = "_QFsimple_real_reduction_switch_orderEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_7:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_8:.*]] = arith.constant 10 : i32
@@ -217,7 +217,7 @@ end subroutine
! CHECK: %[[VAL_10:.*]] = arith.constant 1 : i32
! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : i32, !fir.ref<i32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_int_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_14:.*]] = arith.constant 10 : i32
@@ -277,7 +277,7 @@ end subroutine
! CHECK: %[[VAL_10:.*]] = arith.constant 1.000000e+00 : f32
! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_7]]#0 : f32, !fir.ref<f32>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFmultiple_real_reductions_same_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_14:.*]] = arith.constant 10 : i32
@@ -344,7 +344,7 @@ end subroutine
! CHECK: %[[VAL_13:.*]] = arith.constant 1.000000e+00 : f64
! CHECK: hlfir.assign %[[VAL_13]] to %[[VAL_3]]#0 : f64, !fir.ref<f64>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_14:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFmultiple_reductions_different_typeEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_16:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_17:.*]] = arith.constant 10 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90 b/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90
index 3551b90474aa..ed462b58bbf8 100644
--- a/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90
+++ b/flang/test/Lower/OpenMP/wsloop-reduction-multiple-clauses.f90
@@ -107,7 +107,7 @@ endprogram
! CHECK: %[[VAL_10:.*]] = arith.constant 0.000000e+00 : f64
! CHECK: hlfir.assign %[[VAL_10]] to %[[VAL_4]]#0 : f64, !fir.ref<!fir.array<3x3xf64>>
! CHECK: omp.parallel {
-! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {bindc_name = "i", pinned, {{.*}}}
+! CHECK: %[[VAL_11:.*]] = fir.alloca i32 {adapt.valuebyref, pinned}
! CHECK: %[[VAL_12:.*]]:2 = hlfir.declare %[[VAL_11]] {uniq_name = "_QFEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[VAL_13:.*]] = arith.constant 1 : i32
! CHECK: %[[VAL_14:.*]] = arith.constant 10 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop-variable.f90 b/flang/test/Lower/OpenMP/wsloop-variable.f90
index dc2acf881f48..4d83b3328803 100644
--- a/flang/test/Lower/OpenMP/wsloop-variable.f90
+++ b/flang/test/Lower/OpenMP/wsloop-variable.f90
@@ -91,30 +91,26 @@ program wsloop_variable
end program wsloop_variable
!CHECK-LABEL: func.func @_QPwsloop_variable_sub() {
+!CHECK: %[[VAL_0:.*]] = fir.alloca i8 {adapt.valuebyref, pinned}
+!CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFwsloop_variable_subEi1"} : (!fir.ref<i8>) -> (!fir.ref<i8>, !fir.ref<i8>)
+!CHECK: %[[VAL_2:.*]] = fir.alloca i16 {adapt.valuebyref, pinned}
+!CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFwsloop_variable_subEi2"} : (!fir.ref<i16>) -> (!fir.ref<i16>, !fir.ref<i16>)
!CHECK: %[[VAL_4:.*]] = fir.alloca i8 {bindc_name = "i1", uniq_name = "_QFwsloop_variable_subEi1"}
!CHECK: %[[VAL_5:.*]]:2 = hlfir.declare %[[VAL_4]] {uniq_name = "_QFwsloop_variable_subEi1"} : (!fir.ref<i8>) -> (!fir.ref<i8>, !fir.ref<i8>)
-
!CHECK: %[[VAL_6:.*]] = fir.alloca i128 {bindc_name = "i16_lb", uniq_name = "_QFwsloop_variable_subEi16_lb"}
!CHECK: %[[VAL_7:.*]]:2 = hlfir.declare %[[VAL_6]] {uniq_name = "_QFwsloop_variable_subEi16_lb"} : (!fir.ref<i128>) -> (!fir.ref<i128>, !fir.ref<i128>)
-
!CHECK: %[[VAL_8:.*]] = fir.alloca i8 {bindc_name = "i1_ub", uniq_name = "_QFwsloop_variable_subEi1_ub"}
!CHECK: %[[VAL_9:.*]]:2 = hlfir.declare %[[VAL_8]] {uniq_name = "_QFwsloop_variable_subEi1_ub"} : (!fir.ref<i8>) -> (!fir.ref<i8>, !fir.ref<i8>)
-
!CHECK: %[[VAL_10:.*]] = fir.alloca i16 {bindc_name = "i2", uniq_name = "_QFwsloop_variable_subEi2"}
!CHECK: %[[VAL_11:.*]]:2 = hlfir.declare %[[VAL_10]] {uniq_name = "_QFwsloop_variable_subEi2"} : (!fir.ref<i16>) -> (!fir.ref<i16>, !fir.ref<i16>)
-
!CHECK: %[[VAL_12:.*]] = fir.alloca i16 {bindc_name = "i2_s", uniq_name = "_QFwsloop_variable_subEi2_s"}
!CHECK: %[[VAL_13:.*]]:2 = hlfir.declare %[[VAL_12]] {uniq_name = "_QFwsloop_variable_subEi2_s"} : (!fir.ref<i16>) -> (!fir.ref<i16>, !fir.ref<i16>)
-
!CHECK: %[[VAL_14:.*]] = fir.alloca i32 {bindc_name = "i4_s", uniq_name = "_QFwsloop_variable_subEi4_s"}
!CHECK: %[[VAL_15:.*]]:2 = hlfir.declare %[[VAL_14]] {uniq_name = "_QFwsloop_variable_subEi4_s"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
-
!CHECK: %[[VAL_16:.*]] = fir.alloca i64 {bindc_name = "i8", uniq_name = "_QFwsloop_variable_subEi8"}
!CHECK: %[[VAL_17:.*]]:2 = hlfir.declare %[[VAL_16]] {uniq_name = "_QFwsloop_variable_subEi8"} : (!fir.ref<i64>) -> (!fir.ref<i64>, !fir.ref<i64>)
-
!CHECK: %[[VAL_18:.*]] = fir.alloca i8 {bindc_name = "j1", uniq_name = "_QFwsloop_variable_subEj1"}
!CHECK: %[[VAL_19:.*]]:2 = hlfir.declare %[[VAL_18]] {uniq_name = "_QFwsloop_variable_subEj1"} : (!fir.ref<i8>) -> (!fir.ref<i8>, !fir.ref<i8>)
-
!CHECK: %[[VAL_20:.*]] = fir.alloca f32 {bindc_name = "x", uniq_name = "_QFwsloop_variable_subEx"}
!CHECK: %[[VAL_21:.*]]:2 = hlfir.declare %[[VAL_20]] {uniq_name = "_QFwsloop_variable_subEx"} : (!fir.ref<f32>) -> (!fir.ref<f32>, !fir.ref<f32>)
@@ -126,9 +122,6 @@ subroutine wsloop_variable_sub
integer(kind=16) :: i16_lb
real :: x
-!CHECK: %[[VAL_2:.*]] = fir.alloca i16 {bindc_name = "i2", pinned, {{.*}}}
-!CHECK: %[[VAL_3:.*]]:2 = hlfir.declare %[[VAL_2]] {uniq_name = "_QFwsloop_variable_subEi2"} : (!fir.ref<i16>) -> (!fir.ref<i16>, !fir.ref<i16>)
-
!CHECK: %[[VAL_22:.*]] = arith.constant 1 : i32
!CHECK: %[[VAL_23:.*]] = fir.load %[[VAL_9]]#0 : !fir.ref<i8>
!CHECK: %[[VAL_24:.*]] = fir.load %[[VAL_13]]#0 : !fir.ref<i16>
@@ -173,12 +166,9 @@ subroutine wsloop_variable_sub
end do
!$omp end do
+
!CHECK: %[[VAL_49:.*]] = arith.constant 5 : i8
!CHECK: hlfir.assign %[[VAL_49]] to %[[VAL_19]]#0 : i8, !fir.ref<i8>
-
-!CHECK: %[[VAL_0:.*]] = fir.alloca i8 {bindc_name = "i1", pinned, {{.*}}}
-!CHECK: %[[VAL_1:.*]]:2 = hlfir.declare %[[VAL_0]] {uniq_name = "_QFwsloop_variable_subEi1"} : (!fir.ref<i8>) -> (!fir.ref<i8>, !fir.ref<i8>)
-
!CHECK: %[[VAL_50:.*]] = arith.constant 1 : i32
!CHECK: %[[VAL_51:.*]] = arith.constant 10 : i32
!CHECK: %[[VAL_52:.*]] = arith.constant 1 : i32
diff --git a/flang/test/Lower/OpenMP/wsloop.f90 b/flang/test/Lower/OpenMP/wsloop.f90
index 6536f5b83d2c..da90cb724159 100644
--- a/flang/test/Lower/OpenMP/wsloop.f90
+++ b/flang/test/Lower/OpenMP/wsloop.f90
@@ -7,7 +7,7 @@ subroutine simple_loop
integer :: i
! CHECK: omp.parallel
!$OMP PARALLEL
- ! CHECK: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}}
+ ! CHECK: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned}
! CHECK: %[[IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loopEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32
! CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32
@@ -33,7 +33,7 @@ subroutine simple_loop_with_step
integer :: i
! CHECK: omp.parallel
!$OMP PARALLEL
- ! CHECK: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}}
+ ! CHECK: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned}
! CHECK: %[[IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFsimple_loop_with_stepEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32
! CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32
@@ -59,7 +59,7 @@ subroutine loop_with_schedule_nowait
integer :: i
! CHECK: omp.parallel
!$OMP PARALLEL
- ! CHECK: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned, {{.*}}}
+ ! CHECK: %[[ALLOCA_IV:.*]] = fir.alloca i32 {{{.*}}, pinned}
! CHECK: %[[IV_DECL:.*]]:2 = hlfir.declare %[[ALLOCA_IV]] {uniq_name = "_QFloop_with_schedule_nowaitEi"} : (!fir.ref<i32>) -> (!fir.ref<i32>, !fir.ref<i32>)
! CHECK: %[[WS_LB:.*]] = arith.constant 1 : i32
! CHECK: %[[WS_UB:.*]] = arith.constant 9 : i32
diff --git a/flang/test/Lower/branching-directive.f90 b/flang/test/Lower/branching-directive.f90
index a0a147f1053a..69270d7bcbe9 100644
--- a/flang/test/Lower/branching-directive.f90
+++ b/flang/test/Lower/branching-directive.f90
@@ -1,25 +1,88 @@
-!RUN: flang-new -fc1 -emit-hlfir -fopenmp -o - %s | FileCheck %s
+!RUN: bbc -emit-hlfir -fopenacc -fopenmp -o - %s | FileCheck %s
!https://github.com/llvm/llvm-project/issues/91526
+!CHECK-LABEL: func.func @_QPsimple1
!CHECK: cf.cond_br %{{[0-9]+}}, ^bb[[THEN:[0-9]+]], ^bb[[ELSE:[0-9]+]]
!CHECK: ^bb[[THEN]]:
-!CHECK: cf.br ^bb[[EXIT:[0-9]+]]
+!CHECK: omp.parallel
+!CHECK: cf.br ^bb[[ENDIF:[0-9]+]]
!CHECK: ^bb[[ELSE]]:
!CHECK: fir.call @_FortranAStopStatement
!CHECK: fir.unreachable
-!CHECK: ^bb[[EXIT]]:
+!CHECK: ^bb[[ENDIF]]:
+!CHECK: return
-subroutine simple(y)
+subroutine simple1(y)
implicit none
logical, intent(in) :: y
integer :: i
if (y) then
-!$omp parallel
+ !$omp parallel
i = 1
-!$omp end parallel
+ !$omp end parallel
else
stop 1
end if
-end subroutine simple
+end subroutine
+
+!CHECK-LABEL: func.func @_QPsimple2
+!CHECK: cf.cond_br %{{[0-9]+}}, ^bb[[THEN:[0-9]+]], ^bb[[ELSE:[0-9]+]]
+!CHECK: ^bb[[THEN]]:
+!CHECK: omp.parallel
+!CHECK: cf.br ^bb[[ENDIF:[0-9]+]]
+!CHECK: ^bb[[ELSE]]:
+!CHECK: fir.call @_FortranAStopStatement
+!CHECK: fir.unreachable
+!CHECK: ^bb[[ENDIF]]:
+!CHECK: fir.call @_FortranAioOutputReal64
+!CHECK: return
+subroutine simple2(x, yn)
+ implicit none
+ logical, intent(in) :: yn
+ integer, intent(in) :: x
+ integer :: i
+ real(8) :: E
+ E = 0d0
+
+ if (yn) then
+ !$omp parallel do private(i) reduction(+:E)
+ do i = 1, x
+ E = E + i
+ end do
+ !$omp end parallel do
+ else
+ stop 1
+ end if
+ print *, E
+end subroutine
+
+!CHECK-LABEL: func.func @_QPacccase
+!CHECK: fir.select_case %{{[0-9]+}} : i32 [{{.*}}, ^bb[[CASE1:[0-9]+]], {{.*}}, ^bb[[CASE2:[0-9]+]], {{.*}}, ^bb[[CASE3:[0-9]+]]]
+!CHECK: ^bb[[CASE1]]:
+!CHECK: acc.serial
+!CHECK: cf.br ^bb[[EXIT:[0-9]+]]
+!CHECK: ^bb[[CASE2]]:
+!CHECK: fir.call @_FortranAioOutputAscii
+!CHECK: cf.br ^bb[[EXIT]]
+!CHECK: ^bb[[CASE3]]:
+!CHECK: fir.call @_FortranAioOutputAscii
+!CHECK: cf.br ^bb[[EXIT]]
+!CHECK: ^bb[[EXIT]]:
+!CHECK: return
+subroutine acccase(var)
+ integer :: var
+ integer :: res(10)
+ select case (var)
+ case (1)
+ print *, "case 1"
+ !$acc serial
+ res(1) = 1
+ !$acc end serial
+ case (2)
+ print *, "case 2"
+ case default
+ print *, "case default"
+ end select
+end subroutine
diff --git a/flang/test/Lower/unstructured-control-flow.f90 b/flang/test/Lower/unstructured-control-flow.f90
new file mode 100644
index 000000000000..310293381e5f
--- /dev/null
+++ b/flang/test/Lower/unstructured-control-flow.f90
@@ -0,0 +1,31 @@
+!RUN: bbc -emit-hlfir -o - %s | FileCheck %s
+
+!CHECK-LABEL: func.func @_QPunstructured1
+!CHECK: fir.select %{{[0-9]+}} : i32 [{{.*}}, ^bb[[BLOCK3:[0-9]+]], {{.*}}, ^bb[[BLOCK4:[0-9]+]], {{.*}}, ^bb[[BLOCK5:[0-9]+]], {{.*}}, ^bb[[BLOCK1:[0-9]+]]]
+!CHECK: ^bb[[BLOCK1]]:
+!CHECK: cf.cond_br %{{[0-9]+}}, ^bb[[BLOCK2:[0-9]+]], ^bb[[BLOCK4]]
+!CHECK: ^bb[[BLOCK2]]:
+!CHECK: fir.if
+!CHECK: cf.br ^bb[[BLOCK3]]
+!CHECK: ^bb[[BLOCK3]]:
+!CHECK: %[[C10:[a-z0-9_]+]] = arith.constant 10 : i32
+!CHECK: arith.addi {{.*}}, %[[C10]]
+!CHECK: cf.br ^bb[[BLOCK4]]
+!CHECK: ^bb[[BLOCK4]]:
+!CHECK: %[[C100:[a-z0-9_]+]] = arith.constant 100 : i32
+!CHECK: arith.addi {{.*}}, %[[C100]]
+!CHECK: cf.br ^bb[[BLOCK5]]
+!CHECK: ^bb[[BLOCK5]]:
+!CHECK: %[[C1000:[a-z0-9_]+]] = arith.constant 1000 : i32
+!CHECK: arith.addi {{.*}}, %[[C1000]]
+!CHECK: return
+subroutine unstructured1(j, k)
+ goto (11, 22, 33) j-3 ! computed goto - an expression outside [1,3] is a nop
+ if (j == 2) goto 22
+ if (j == 1) goto 11
+ k = k + 1
+11 k = k + 10
+22 k = k + 100
+33 k = k + 1000
+end
+
diff --git a/flang/test/Parser/OpenMP/masked-unparse.f90 b/flang/test/Parser/OpenMP/masked-unparse.f90
new file mode 100644
index 000000000000..16d7ca68e3e1
--- /dev/null
+++ b/flang/test/Parser/OpenMP/masked-unparse.f90
@@ -0,0 +1,92 @@
+! RUN: %flang_fc1 -fdebug-unparse -fopenmp %s | FileCheck --ignore-case %s
+! RUN: %flang_fc1 -fdebug-dump-parse-tree -fopenmp %s | FileCheck --check-prefix="PARSE-TREE" %s
+
+! Check for parsing of masked directive with filter clause.
+
+
+subroutine test_masked()
+ integer :: c = 1
+ !PARSE-TREE: OmpBeginBlockDirective
+ !PARSE-TREE-NEXT: OmpBlockDirective -> llvm::omp::Directive = masked
+ !CHECK: !$omp masked
+ !$omp masked
+ c = c + 1
+ !$omp end masked
+ !PARSE-TREE: OmpBeginBlockDirective
+ !PARSE-TREE-NEXT: OmpBlockDirective -> llvm::omp::Directive = masked
+ !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Filter -> Scalar -> Integer -> Expr = '1_4'
+ !PARSE-TREE-NEXT: LiteralConstant -> IntLiteralConstant = '1'
+ !CHECK: !$omp masked filter(1_4)
+ !$omp masked filter(1)
+ c = c + 2
+ !$omp end masked
+end subroutine
+
+subroutine test_masked_taskloop_simd()
+ integer :: i, j = 1
+ !PARSE-TREE: OmpBeginLoopDirective
+ !PARSE-TREE-NEXT: OmpLoopDirective -> llvm::omp::Directive = masked taskloop simd
+ !CHECK: !$omp masked taskloop simd
+ !$omp masked taskloop simd
+ do i=1,10
+ j = j + 1
+ end do
+ !$omp end masked taskloop simd
+end subroutine
+
+subroutine test_masked_taskloop
+ integer :: i, j = 1
+ !PARSE-TREE: OmpBeginLoopDirective
+ !PARSE-TREE-NEXT: OmpLoopDirective -> llvm::omp::Directive = masked taskloop
+ !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Filter -> Scalar -> Integer -> Expr = '2_4'
+ !PARSE-TREE-NEXT: LiteralConstant -> IntLiteralConstant = '2'
+ !CHECK: !$omp masked taskloop filter(2_4)
+ !$omp masked taskloop filter(2)
+ do i=1,10
+ j = j + 1
+ end do
+ !$omp end masked taskloop
+end subroutine
+
+subroutine test_parallel_masked
+ integer, parameter :: i = 1, j = 1
+ integer :: c = 2
+ !PARSE-TREE: OmpBeginBlockDirective
+ !PARSE-TREE-NEXT: OmpBlockDirective -> llvm::omp::Directive = parallel masked
+ !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Filter -> Scalar -> Integer -> Expr = '2_4'
+ !PARSE-TREE-NEXT: Add
+ !PARSE-TREE-NEXT: Expr = '1_4'
+ !PARSE-TREE-NEXT: Designator -> DataRef -> Name = 'i'
+ !PARSE-TREE-NEXT: Expr = '1_4'
+ !PARSE-TREE-NEXT: Designator -> DataRef -> Name = 'j'
+ !CHECK: !$omp parallel masked filter(2_4)
+ !$omp parallel masked filter(i+j)
+ c = c + 2
+ !$omp end parallel masked
+end subroutine
+
+subroutine test_parallel_masked_taskloop_simd
+ integer :: i, j = 1
+ !PARSE-TREE: OmpBeginLoopDirective
+ !PARSE-TREE-NEXT: OmpLoopDirective -> llvm::omp::Directive = parallel masked taskloop simd
+ !CHECK: !$omp parallel masked taskloop simd
+ !$omp parallel masked taskloop simd
+ do i=1,10
+ j = j + 1
+ end do
+ !$omp end parallel masked taskloop simd
+end subroutine
+
+subroutine test_parallel_masked_taskloop
+ integer :: i, j = 1
+ !PARSE-TREE: OmpBeginLoopDirective
+ !PARSE-TREE-NEXT: OmpLoopDirective -> llvm::omp::Directive = parallel masked taskloop
+ !PARSE-TREE-NEXT: OmpClauseList -> OmpClause -> Filter -> Scalar -> Integer -> Expr = '2_4'
+ !PARSE-TREE-NEXT: LiteralConstant -> IntLiteralConstant = '2'
+ !CHECK: !$omp parallel masked taskloop filter(2_4)
+ !$omp parallel masked taskloop filter(2)
+ do i=1,10
+ j = j + 1
+ end do
+ !$omp end parallel masked taskloop
+end subroutine
diff --git a/flang/test/Semantics/OpenMP/allocate-clause01.f90 b/flang/test/Semantics/OpenMP/allocate-clause01.f90
index 2b9a72e928eb..486166ec6338 100644
--- a/flang/test/Semantics/OpenMP/allocate-clause01.f90
+++ b/flang/test/Semantics/OpenMP/allocate-clause01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocate-directive.f90 b/flang/test/Semantics/OpenMP/allocate-directive.f90
index 18a14b825f00..f55b724980fb 100644
--- a/flang/test/Semantics/OpenMP/allocate-directive.f90
+++ b/flang/test/Semantics/OpenMP/allocate-directive.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocate01.f90 b/flang/test/Semantics/OpenMP/allocate01.f90
index 6ccb8bb09e83..a3d5fb5f90cd 100644
--- a/flang/test/Semantics/OpenMP/allocate01.f90
+++ b/flang/test/Semantics/OpenMP/allocate01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocate02.f90 b/flang/test/Semantics/OpenMP/allocate02.f90
index 8f0579e810bb..b9bfdbe55aa2 100644
--- a/flang/test/Semantics/OpenMP/allocate02.f90
+++ b/flang/test/Semantics/OpenMP/allocate02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocate03.f90 b/flang/test/Semantics/OpenMP/allocate03.f90
index e35115f3897c..ce577c857985 100644
--- a/flang/test/Semantics/OpenMP/allocate03.f90
+++ b/flang/test/Semantics/OpenMP/allocate03.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocate04.f90 b/flang/test/Semantics/OpenMP/allocate04.f90
index ea89d9446cc1..37f180cc16aa 100644
--- a/flang/test/Semantics/OpenMP/allocate04.f90
+++ b/flang/test/Semantics/OpenMP/allocate04.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocate05.f90 b/flang/test/Semantics/OpenMP/allocate05.f90
index a787e8bb32a4..c4e0ace988bd 100644
--- a/flang/test/Semantics/OpenMP/allocate05.f90
+++ b/flang/test/Semantics/OpenMP/allocate05.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocate06.f90 b/flang/test/Semantics/OpenMP/allocate06.f90
index e14134cd0730..e25b4c4decd5 100644
--- a/flang/test/Semantics/OpenMP/allocate06.f90
+++ b/flang/test/Semantics/OpenMP/allocate06.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocate07.f90 b/flang/test/Semantics/OpenMP/allocate07.f90
index 396df598b252..2b0f17647b3c 100644
--- a/flang/test/Semantics/OpenMP/allocate07.f90
+++ b/flang/test/Semantics/OpenMP/allocate07.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocate08.f90 b/flang/test/Semantics/OpenMP/allocate08.f90
index fc950ea4fca3..82aa11d69cfc 100644
--- a/flang/test/Semantics/OpenMP/allocate08.f90
+++ b/flang/test/Semantics/OpenMP/allocate08.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocate09.f90 b/flang/test/Semantics/OpenMP/allocate09.f90
index 0f93a340fe1e..3664c34c7e43 100644
--- a/flang/test/Semantics/OpenMP/allocate09.f90
+++ b/flang/test/Semantics/OpenMP/allocate09.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocators01.f90 b/flang/test/Semantics/OpenMP/allocators01.f90
index c75c522ecae1..f10db35f96d9 100644
--- a/flang/test/Semantics/OpenMP/allocators01.f90
+++ b/flang/test/Semantics/OpenMP/allocators01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocators02.f90 b/flang/test/Semantics/OpenMP/allocators02.f90
index 8055d21c6809..7f8fa3600277 100644
--- a/flang/test/Semantics/OpenMP/allocators02.f90
+++ b/flang/test/Semantics/OpenMP/allocators02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocators03.f90 b/flang/test/Semantics/OpenMP/allocators03.f90
index 03cff1b1e991..050cc2051c99 100644
--- a/flang/test/Semantics/OpenMP/allocators03.f90
+++ b/flang/test/Semantics/OpenMP/allocators03.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocators04.f90 b/flang/test/Semantics/OpenMP/allocators04.f90
index 1d2e96443a9d..3c84030c4e39 100644
--- a/flang/test/Semantics/OpenMP/allocators04.f90
+++ b/flang/test/Semantics/OpenMP/allocators04.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocators05.f90 b/flang/test/Semantics/OpenMP/allocators05.f90
index d0e11ca5874d..8fd80b033756 100644
--- a/flang/test/Semantics/OpenMP/allocators05.f90
+++ b/flang/test/Semantics/OpenMP/allocators05.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/allocators06.f90 b/flang/test/Semantics/OpenMP/allocators06.f90
index a975204c1133..881182caa9b3 100644
--- a/flang/test/Semantics/OpenMP/allocators06.f90
+++ b/flang/test/Semantics/OpenMP/allocators06.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/atomic-hint-clause.f90 b/flang/test/Semantics/OpenMP/atomic-hint-clause.f90
index e157b7e1e73a..9050cbb0dca6 100644
--- a/flang/test/Semantics/OpenMP/atomic-hint-clause.f90
+++ b/flang/test/Semantics/OpenMP/atomic-hint-clause.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/atomic.f90 b/flang/test/Semantics/OpenMP/atomic.f90
index 44f06b7460bf..2f270ce33338 100644
--- a/flang/test/Semantics/OpenMP/atomic.f90
+++ b/flang/test/Semantics/OpenMP/atomic.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
use omp_lib
! Check OpenMP 2.13.6 atomic Construct
diff --git a/flang/test/Semantics/OpenMP/atomic01.f90 b/flang/test/Semantics/OpenMP/atomic01.f90
index f0e1b47d2fa1..6ec94f3ff3a4 100644
--- a/flang/test/Semantics/OpenMP/atomic01.f90
+++ b/flang/test/Semantics/OpenMP/atomic01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/atomic02.f90 b/flang/test/Semantics/OpenMP/atomic02.f90
index b823bc4c33b2..92f2c4b9d040 100644
--- a/flang/test/Semantics/OpenMP/atomic02.f90
+++ b/flang/test/Semantics/OpenMP/atomic02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/atomic03.f90 b/flang/test/Semantics/OpenMP/atomic03.f90
index 76367495b986..4cce71dba351 100644
--- a/flang/test/Semantics/OpenMP/atomic03.f90
+++ b/flang/test/Semantics/OpenMP/atomic03.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/atomic04.f90 b/flang/test/Semantics/OpenMP/atomic04.f90
index a9644ad95aa3..c03b230c837a 100644
--- a/flang/test/Semantics/OpenMP/atomic04.f90
+++ b/flang/test/Semantics/OpenMP/atomic04.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/atomic05.f90 b/flang/test/Semantics/OpenMP/atomic05.f90
index 2d9566463309..cfba33968213 100644
--- a/flang/test/Semantics/OpenMP/atomic05.f90
+++ b/flang/test/Semantics/OpenMP/atomic05.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/barrier.f90 b/flang/test/Semantics/OpenMP/barrier.f90
index 1483fbd08f95..5fc3f7f3bd70 100644
--- a/flang/test/Semantics/OpenMP/barrier.f90
+++ b/flang/test/Semantics/OpenMP/barrier.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
!$omp barrier
diff --git a/flang/test/Semantics/OpenMP/clause-validity01.f90 b/flang/test/Semantics/OpenMP/clause-validity01.f90
index 22ac57065ffe..779be00b9eba 100644
--- a/flang/test/Semantics/OpenMP/clause-validity01.f90
+++ b/flang/test/Semantics/OpenMP/clause-validity01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags %openmp_module_flag
diff --git a/flang/test/Semantics/OpenMP/combined-constructs.f90 b/flang/test/Semantics/OpenMP/combined-constructs.f90
index 35ab6fcac58b..ba504d1b8e22 100644
--- a/flang/test/Semantics/OpenMP/combined-constructs.f90
+++ b/flang/test/Semantics/OpenMP/combined-constructs.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
program main
diff --git a/flang/test/Semantics/OpenMP/common-block.f90 b/flang/test/Semantics/OpenMP/common-block.f90
index e1ddd120da85..4ddc5474a628 100644
--- a/flang/test/Semantics/OpenMP/common-block.f90
+++ b/flang/test/Semantics/OpenMP/common-block.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %flang_fc1 -fopenmp -fdebug-dump-symbols %s | FileCheck %s
program main
diff --git a/flang/test/Semantics/OpenMP/compiler-directive.f90 b/flang/test/Semantics/OpenMP/compiler-directive.f90
index 5d3e9bae27fd..07363ac5ac1e 100644
--- a/flang/test/Semantics/OpenMP/compiler-directive.f90
+++ b/flang/test/Semantics/OpenMP/compiler-directive.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! CompilerDirective with openmp tests
diff --git a/flang/test/Semantics/OpenMP/copyin01.f90 b/flang/test/Semantics/OpenMP/copyin01.f90
index 0051b5d441f0..387a9fc7cf0b 100644
--- a/flang/test/Semantics/OpenMP/copyin01.f90
+++ b/flang/test/Semantics/OpenMP/copyin01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
! 2.15.4.1 copyin Clause
diff --git a/flang/test/Semantics/OpenMP/copyin02.f90 b/flang/test/Semantics/OpenMP/copyin02.f90
index 09b876677ea3..92512890e3ed 100644
--- a/flang/test/Semantics/OpenMP/copyin02.f90
+++ b/flang/test/Semantics/OpenMP/copyin02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
! 2.15.4.1 copyin Clause
diff --git a/flang/test/Semantics/OpenMP/copyin03.f90 b/flang/test/Semantics/OpenMP/copyin03.f90
index 7c3759aa2e11..5c0a2e873d81 100644
--- a/flang/test/Semantics/OpenMP/copyin03.f90
+++ b/flang/test/Semantics/OpenMP/copyin03.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
! 2.15.4.1 copyin Clause
diff --git a/flang/test/Semantics/OpenMP/copyin04.f90 b/flang/test/Semantics/OpenMP/copyin04.f90
index 6f5e8dfef217..7cbee5f4afab 100644
--- a/flang/test/Semantics/OpenMP/copyin04.f90
+++ b/flang/test/Semantics/OpenMP/copyin04.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
! 2.15.4.1 copyin Clause
diff --git a/flang/test/Semantics/OpenMP/copyin05.f90 b/flang/test/Semantics/OpenMP/copyin05.f90
index 142d5a7345c6..aec6a7f88070 100644
--- a/flang/test/Semantics/OpenMP/copyin05.f90
+++ b/flang/test/Semantics/OpenMP/copyin05.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
! 2.15.4.1 copyin Clause
diff --git a/flang/test/Semantics/OpenMP/copying.f90 b/flang/test/Semantics/OpenMP/copying.f90
index 63fb39a0f26e..d56d2b8932cf 100644
--- a/flang/test/Semantics/OpenMP/copying.f90
+++ b/flang/test/Semantics/OpenMP/copying.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp -Werror -pedantic
! OpenMP Version 5.0
! 2.19.4.4 firstprivate Clause
diff --git a/flang/test/Semantics/OpenMP/copyprivate01.f90 b/flang/test/Semantics/OpenMP/copyprivate01.f90
index d5cf27347607..4920d7abbe7c 100644
--- a/flang/test/Semantics/OpenMP/copyprivate01.f90
+++ b/flang/test/Semantics/OpenMP/copyprivate01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! OpenMP Version 4.5
! 2.15.4.2 copyprivate Clause
diff --git a/flang/test/Semantics/OpenMP/copyprivate02.f90 b/flang/test/Semantics/OpenMP/copyprivate02.f90
index 35fd6dddd20c..2157cd4cb558 100644
--- a/flang/test/Semantics/OpenMP/copyprivate02.f90
+++ b/flang/test/Semantics/OpenMP/copyprivate02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! OpenMP Version 4.5
! 2.15.4.2 copyprivate Clause
diff --git a/flang/test/Semantics/OpenMP/copyprivate03.f90 b/flang/test/Semantics/OpenMP/copyprivate03.f90
index 9d39fdb6b13c..f1433ced8aac 100644
--- a/flang/test/Semantics/OpenMP/copyprivate03.f90
+++ b/flang/test/Semantics/OpenMP/copyprivate03.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! OpenMP Version 4.5
! 2.15.4.2 copyprivate Clause
diff --git a/flang/test/Semantics/OpenMP/critical-empty.f90 b/flang/test/Semantics/OpenMP/critical-empty.f90
index 2001c8a14a7b..706f6d806f55 100644
--- a/flang/test/Semantics/OpenMP/critical-empty.f90
+++ b/flang/test/Semantics/OpenMP/critical-empty.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! Test that there are no errors for an empty critical construct
diff --git a/flang/test/Semantics/OpenMP/critical-hint-clause.f90 b/flang/test/Semantics/OpenMP/critical-hint-clause.f90
index 419187fa3bbf..d737d671973c 100644
--- a/flang/test/Semantics/OpenMP/critical-hint-clause.f90
+++ b/flang/test/Semantics/OpenMP/critical-hint-clause.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/do02.f90 b/flang/test/Semantics/OpenMP/do02.f90
deleted file mode 100644
index 9749991e4f96..000000000000
--- a/flang/test/Semantics/OpenMP/do02.f90
+++ /dev/null
@@ -1,21 +0,0 @@
-! RUN: %S/test_errors.sh %s %t %flang -fopenmp
-! XFAIL: *
-
-! OpenMP Version 4.5
-! 2.7.1 Loop Construct
-! Exit statement terminating !$OMP DO loop
-
-program omp_do
- integer i, j, k
-
- !$omp do
- do i = 1, 10
- do j = 1, 10
- print *, "Hello"
- end do
- !ERROR: EXIT statement terminating !$OMP DO loop
- exit
- end do
- !$omp end do
-
-end program omp_do
diff --git a/flang/test/Semantics/OpenMP/masked.f90 b/flang/test/Semantics/OpenMP/masked.f90
new file mode 100644
index 000000000000..1113853ee8a9
--- /dev/null
+++ b/flang/test/Semantics/OpenMP/masked.f90
@@ -0,0 +1,13 @@
+! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
+
+subroutine test_masked()
+ integer :: c = 1
+ !ERROR: At most one FILTER clause can appear on the MASKED directive
+ !$omp masked filter(1) filter(2)
+ c = c + 1
+ !$omp end masked
+ !ERROR: NOWAIT clause is not allowed on the MASKED directive
+ !$omp masked nowait
+ c = c + 2
+ !$omp end masked
+end subroutine
diff --git a/flang/test/Semantics/OpenMP/sections01.f90 b/flang/test/Semantics/OpenMP/sections01.f90
index c26cc88dcc7a..00b5a6d8fbc4 100644
--- a/flang/test/Semantics/OpenMP/sections01.f90
+++ b/flang/test/Semantics/OpenMP/sections01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
diff --git a/flang/test/Semantics/OpenMP/sections02.f90 b/flang/test/Semantics/OpenMP/sections02.f90
index ee29922a72c0..912e7bc2a8ff 100644
--- a/flang/test/Semantics/OpenMP/sections02.f90
+++ b/flang/test/Semantics/OpenMP/sections02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/sections03.f90 b/flang/test/Semantics/OpenMP/sections03.f90
index 69775013ea82..b170f8674d19 100644
--- a/flang/test/Semantics/OpenMP/sections03.f90
+++ b/flang/test/Semantics/OpenMP/sections03.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
!XFAIL: *
! OpenMP version 5.0.0
diff --git a/flang/test/Semantics/OpenMP/simd-aligned.f90 b/flang/test/Semantics/OpenMP/simd-aligned.f90
index 0a9f95833e22..3ffdc68693fd 100644
--- a/flang/test/Semantics/OpenMP/simd-aligned.f90
+++ b/flang/test/Semantics/OpenMP/simd-aligned.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
diff --git a/flang/test/Semantics/OpenMP/simd-nontemporal.f90 b/flang/test/Semantics/OpenMP/simd-nontemporal.f90
index a488edd98cdc..074b0a2039ed 100644
--- a/flang/test/Semantics/OpenMP/simd-nontemporal.f90
+++ b/flang/test/Semantics/OpenMP/simd-nontemporal.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
diff --git a/flang/test/Semantics/OpenMP/simd01.f90 b/flang/test/Semantics/OpenMP/simd01.f90
index 1aa2880cda83..1e241648f75a 100644
--- a/flang/test/Semantics/OpenMP/simd01.f90
+++ b/flang/test/Semantics/OpenMP/simd01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 5.0
! 2.9.3.1 simd Construct
diff --git a/flang/test/Semantics/OpenMP/simd02.f90 b/flang/test/Semantics/OpenMP/simd02.f90
index a627e2ac2d67..24d6abd9761f 100644
--- a/flang/test/Semantics/OpenMP/simd02.f90
+++ b/flang/test/Semantics/OpenMP/simd02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
diff --git a/flang/test/Semantics/OpenMP/simd03.f90 b/flang/test/Semantics/OpenMP/simd03.f90
index 38f45da47748..8df48368fa96 100644
--- a/flang/test/Semantics/OpenMP/simd03.f90
+++ b/flang/test/Semantics/OpenMP/simd03.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %S/test_errors.sh %s %t %flang -fopenmp
! XFAIL: *
diff --git a/flang/test/Semantics/OpenMP/single01.f90 b/flang/test/Semantics/OpenMP/single01.f90
index 2e40bec56e9c..0468e695d8cf 100644
--- a/flang/test/Semantics/OpenMP/single01.f90
+++ b/flang/test/Semantics/OpenMP/single01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! OpenMP Version 4.5
! 2.7.3 single Construct
diff --git a/flang/test/Semantics/OpenMP/single02.f90 b/flang/test/Semantics/OpenMP/single02.f90
index 03cf7fbb6ad3..9d9d306c2f53 100644
--- a/flang/test/Semantics/OpenMP/single02.f90
+++ b/flang/test/Semantics/OpenMP/single02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! OpenMP Version 4.5
! 2.7.3 single Construct
diff --git a/flang/test/Semantics/OpenMP/struct.f90 b/flang/test/Semantics/OpenMP/struct.f90
index 8ae1fbe4da86..3d2000aef993 100644
--- a/flang/test/Semantics/OpenMP/struct.f90
+++ b/flang/test/Semantics/OpenMP/struct.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! Check OpenMP compatibility with the DEC STRUCTURE extension
diff --git a/flang/test/Semantics/OpenMP/symbol01.f90 b/flang/test/Semantics/OpenMP/symbol01.f90
index 0b435a9ab985..e2a9c01e9d5f 100644
--- a/flang/test/Semantics/OpenMP/symbol01.f90
+++ b/flang/test/Semantics/OpenMP/symbol01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_symbols.py %s %flang_fc1 -fopenmp
! Test clauses that accept list.
diff --git a/flang/test/Semantics/OpenMP/symbol02.f90 b/flang/test/Semantics/OpenMP/symbol02.f90
index f6ffc5500d0a..1b1dc4489448 100644
--- a/flang/test/Semantics/OpenMP/symbol02.f90
+++ b/flang/test/Semantics/OpenMP/symbol02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_symbols.py %s %flang_fc1 -fopenmp
! 1.4.1 Structure of the OpenMP Memory Model
diff --git a/flang/test/Semantics/OpenMP/symbol03.f90 b/flang/test/Semantics/OpenMP/symbol03.f90
index 93e9b7a3eae6..76d93577d3ac 100644
--- a/flang/test/Semantics/OpenMP/symbol03.f90
+++ b/flang/test/Semantics/OpenMP/symbol03.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_symbols.py %s %flang_fc1 -fopenmp
! 1.4.1 Structure of the OpenMP Memory Model
diff --git a/flang/test/Semantics/OpenMP/symbol04.f90 b/flang/test/Semantics/OpenMP/symbol04.f90
index 808d1e0dd09b..8ef154ebbf9d 100644
--- a/flang/test/Semantics/OpenMP/symbol04.f90
+++ b/flang/test/Semantics/OpenMP/symbol04.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_symbols.py %s %flang_fc1 -fopenmp
! 2.15.3 Data-Sharing Attribute Clauses
diff --git a/flang/test/Semantics/OpenMP/symbol05.f90 b/flang/test/Semantics/OpenMP/symbol05.f90
index fa0a8f65a429..d08d85270380 100644
--- a/flang/test/Semantics/OpenMP/symbol05.f90
+++ b/flang/test/Semantics/OpenMP/symbol05.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_symbols.py %s %flang_fc1 -fopenmp
! 2.15.2 threadprivate Directive
diff --git a/flang/test/Semantics/OpenMP/symbol06.f90 b/flang/test/Semantics/OpenMP/symbol06.f90
index 906264eb1264..a2cd288dfd15 100644
--- a/flang/test/Semantics/OpenMP/symbol06.f90
+++ b/flang/test/Semantics/OpenMP/symbol06.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_symbols.py %s %flang_fc1 -fopenmp
! 2.15.3 Data-Sharing Attribute Clauses
diff --git a/flang/test/Semantics/OpenMP/symbol07.f90 b/flang/test/Semantics/OpenMP/symbol07.f90
index e2250f5c7908..ee6cd2a0df2e 100644
--- a/flang/test/Semantics/OpenMP/symbol07.f90
+++ b/flang/test/Semantics/OpenMP/symbol07.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_symbols.py %s %flang_fc1 -fopenmp
! Generic tests
diff --git a/flang/test/Semantics/OpenMP/symbol08.f90 b/flang/test/Semantics/OpenMP/symbol08.f90
index 3af85af74ee9..76db86cd54ca 100644
--- a/flang/test/Semantics/OpenMP/symbol08.f90
+++ b/flang/test/Semantics/OpenMP/symbol08.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_symbols.py %s %flang_fc1 -fopenmp
! 2.15.1.1 Predetermined rules for associated do-loops index variable
diff --git a/flang/test/Semantics/OpenMP/symbol09.f90 b/flang/test/Semantics/OpenMP/symbol09.f90
index e2250f5c7908..ee6cd2a0df2e 100644
--- a/flang/test/Semantics/OpenMP/symbol09.f90
+++ b/flang/test/Semantics/OpenMP/symbol09.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_symbols.py %s %flang_fc1 -fopenmp
! Generic tests
diff --git a/flang/test/Semantics/OpenMP/sync-critical01.f90 b/flang/test/Semantics/OpenMP/sync-critical01.f90
index b597eb17ea22..ef377ebc72f2 100644
--- a/flang/test/Semantics/OpenMP/sync-critical01.f90
+++ b/flang/test/Semantics/OpenMP/sync-critical01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 5.0
diff --git a/flang/test/Semantics/OpenMP/sync-critical02.f90 b/flang/test/Semantics/OpenMP/sync-critical02.f90
index 1fa9d6ad84f2..681aa7944c4f 100644
--- a/flang/test/Semantics/OpenMP/sync-critical02.f90
+++ b/flang/test/Semantics/OpenMP/sync-critical02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/taskgroup01.f90 b/flang/test/Semantics/OpenMP/taskgroup01.f90
index 9de1df91bf3b..98c9aabffa23 100644
--- a/flang/test/Semantics/OpenMP/taskgroup01.f90
+++ b/flang/test/Semantics/OpenMP/taskgroup01.f90
@@ -47,4 +47,4 @@ use omp_lib
!$omp end taskgroup
!$omp end task
!$omp end parallel
-end program \ No newline at end of file
+end program
diff --git a/flang/test/Semantics/OpenMP/taskloop01.f90 b/flang/test/Semantics/OpenMP/taskloop01.f90
index 6bef58438151..2c5375949404 100644
--- a/flang/test/Semantics/OpenMP/taskloop01.f90
+++ b/flang/test/Semantics/OpenMP/taskloop01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
! 2.9.2 taskloop Construct
diff --git a/flang/test/Semantics/OpenMP/taskloop02.f90 b/flang/test/Semantics/OpenMP/taskloop02.f90
index 867ef8a9806d..275b079d38a1 100644
--- a/flang/test/Semantics/OpenMP/taskloop02.f90
+++ b/flang/test/Semantics/OpenMP/taskloop02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: not %flang -fsyntax-only -fopenmp %s 2>&1 | FileCheck %s
! OpenMP Version 4.5
! 2.9.2 taskloop Construct
diff --git a/flang/test/Semantics/OpenMP/taskloop03.f90 b/flang/test/Semantics/OpenMP/taskloop03.f90
deleted file mode 100644
index 7e2e426a3fe7..000000000000
--- a/flang/test/Semantics/OpenMP/taskloop03.f90
+++ /dev/null
@@ -1,25 +0,0 @@
-! RUN: %S/test_errors.sh %s %t %flang -fopenmp
-! XFAIL: *
-
-! OpenMP Version 4.5
-! 2.9.2 taskloop Construct
-! All loops associated with the taskloop construct must be perfectly nested,
-! there must be no intervening code or any OpenMP directive between
-! any two loops
-
-program omp_taskloop
- integer i, j
-
- !$omp taskloop private(j) grainsize(500) nogroup
- do i=1, 10000
- do j=1, i
- call loop_body(i, j)
- end do
- !ERROR: Loops associated with !$omp taskloop is not perfectly nested
- !$omp single
- print *, "omp single"
- !$omp end single
- end do
- !$omp end taskloop
-
-end program omp_taskloop
diff --git a/flang/test/Semantics/OpenMP/taskwait.f90 b/flang/test/Semantics/OpenMP/taskwait.f90
index e60051c9da8a..a3b15c7a1df0 100644
--- a/flang/test/Semantics/OpenMP/taskwait.f90
+++ b/flang/test/Semantics/OpenMP/taskwait.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
!$omp taskwait
diff --git a/flang/test/Semantics/OpenMP/threadprivate01.f90 b/flang/test/Semantics/OpenMP/threadprivate01.f90
index c2cf9ba99ab0..6597941ac3d5 100644
--- a/flang/test/Semantics/OpenMP/threadprivate01.f90
+++ b/flang/test/Semantics/OpenMP/threadprivate01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! REQUIRES: openmp_runtime
! RUN: %python %S/../test_errors.py %s %flang_fc1 %openmp_flags
diff --git a/flang/test/Semantics/OpenMP/threadprivate02.f90 b/flang/test/Semantics/OpenMP/threadprivate02.f90
index 7f6e8dcc8e8a..862d1e8a45c4 100644
--- a/flang/test/Semantics/OpenMP/threadprivate02.f90
+++ b/flang/test/Semantics/OpenMP/threadprivate02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! OpenMP Version 5.1
! Check OpenMP construct validity for the following directives:
diff --git a/flang/test/Semantics/OpenMP/threadprivate03.f90 b/flang/test/Semantics/OpenMP/threadprivate03.f90
index b466a8e05e9c..57d3b9209820 100644
--- a/flang/test/Semantics/OpenMP/threadprivate03.f90
+++ b/flang/test/Semantics/OpenMP/threadprivate03.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp -pedantic
! OpenMP Version 5.1
! Check OpenMP construct validity for the following directives:
diff --git a/flang/test/Semantics/OpenMP/threadprivate04.f90 b/flang/test/Semantics/OpenMP/threadprivate04.f90
index 3d8c7fb8de8f..8199dbaea166 100644
--- a/flang/test/Semantics/OpenMP/threadprivate04.f90
+++ b/flang/test/Semantics/OpenMP/threadprivate04.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! OpenMP Version 5.1
! Check OpenMP construct validity for the following directives:
diff --git a/flang/test/Semantics/OpenMP/threadprivate05.f90 b/flang/test/Semantics/OpenMP/threadprivate05.f90
index cdbf3701b70a..eecf9e781cf7 100644
--- a/flang/test/Semantics/OpenMP/threadprivate05.f90
+++ b/flang/test/Semantics/OpenMP/threadprivate05.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! OpenMP Version 5.1
! Check OpenMP construct validity for the following directives:
diff --git a/flang/test/Semantics/OpenMP/threadprivate06.f90 b/flang/test/Semantics/OpenMP/threadprivate06.f90
index f31c38f6f2b2..5537a8805e9f 100644
--- a/flang/test/Semantics/OpenMP/threadprivate06.f90
+++ b/flang/test/Semantics/OpenMP/threadprivate06.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! OpenMP Version 5.1
! Check OpenMP construct validity for the following directives:
diff --git a/flang/test/Semantics/OpenMP/threadprivate07.f90 b/flang/test/Semantics/OpenMP/threadprivate07.f90
index c9a006ca0e08..5302fdf4ab71 100644
--- a/flang/test/Semantics/OpenMP/threadprivate07.f90
+++ b/flang/test/Semantics/OpenMP/threadprivate07.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! Check Threadprivate Directive with local variable of a BLOCK construct.
diff --git a/flang/test/Semantics/OpenMP/use_device_addr.f90 b/flang/test/Semantics/OpenMP/use_device_addr.f90
index 93a7643b5eb4..dda00d510504 100644
--- a/flang/test/Semantics/OpenMP/use_device_addr.f90
+++ b/flang/test/Semantics/OpenMP/use_device_addr.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %flang_fc1 -fopenmp -fdebug-dump-symbols %s | FileCheck %s
! OpenMP Version 5.1
! 2.14.2 use_device_addr clause
diff --git a/flang/test/Semantics/OpenMP/use_device_addr1.f90 b/flang/test/Semantics/OpenMP/use_device_addr1.f90
index 867e324b68ad..c37e9a3a7e3e 100644
--- a/flang/test/Semantics/OpenMP/use_device_addr1.f90
+++ b/flang/test/Semantics/OpenMP/use_device_addr1.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! OpenMP Version 5.0
! 2.10.1 use_device_ptr clause
diff --git a/flang/test/Semantics/OpenMP/use_device_ptr.f90 b/flang/test/Semantics/OpenMP/use_device_ptr.f90
index 64b98cf67961..e9e7fbb6c1f5 100644
--- a/flang/test/Semantics/OpenMP/use_device_ptr.f90
+++ b/flang/test/Semantics/OpenMP/use_device_ptr.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %flang_fc1 -fopenmp -fdebug-dump-symbols %s | FileCheck %s
! OpenMP Version 5.0
! 2.10.1 use_device_ptr clause
diff --git a/flang/test/Semantics/OpenMP/use_device_ptr1.f90 b/flang/test/Semantics/OpenMP/use_device_ptr1.f90
index 176fb5f35a84..f705c50370da 100644
--- a/flang/test/Semantics/OpenMP/use_device_ptr1.f90
+++ b/flang/test/Semantics/OpenMP/use_device_ptr1.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang_fc1 -fopenmp
! OpenMP Version 5.0
! 2.10.1 use_device_ptr clause
diff --git a/flang/test/Semantics/OpenMP/workshare01.f90 b/flang/test/Semantics/OpenMP/workshare01.f90
index 9667a306061c..615c3408dc7a 100644
--- a/flang/test/Semantics/OpenMP/workshare01.f90
+++ b/flang/test/Semantics/OpenMP/workshare01.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
! 2.7.4 workshare Construct
diff --git a/flang/test/Semantics/OpenMP/workshare02.f90 b/flang/test/Semantics/OpenMP/workshare02.f90
index e099ecb9f1e6..b6faf197f1f2 100644
--- a/flang/test/Semantics/OpenMP/workshare02.f90
+++ b/flang/test/Semantics/OpenMP/workshare02.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
! 2.7.4 workshare Construct
diff --git a/flang/test/Semantics/OpenMP/workshare03.f90 b/flang/test/Semantics/OpenMP/workshare03.f90
index 09d46abf42ee..2aea0ccce3c7 100644
--- a/flang/test/Semantics/OpenMP/workshare03.f90
+++ b/flang/test/Semantics/OpenMP/workshare03.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
! 2.7.4 workshare Construct
diff --git a/flang/test/Semantics/OpenMP/workshare04.f90 b/flang/test/Semantics/OpenMP/workshare04.f90
index 0ec635e52d2b..e84459978e15 100644
--- a/flang/test/Semantics/OpenMP/workshare04.f90
+++ b/flang/test/Semantics/OpenMP/workshare04.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
! 2.7.4 workshare Construct
diff --git a/flang/test/Semantics/OpenMP/workshare05.f90 b/flang/test/Semantics/OpenMP/workshare05.f90
index b57053e092e6..30f3b988de91 100644
--- a/flang/test/Semantics/OpenMP/workshare05.f90
+++ b/flang/test/Semantics/OpenMP/workshare05.f90
@@ -1,3 +1,5 @@
+! UNSUPPORTED: system-windows
+! Marking as unsupported due to suspected long runtime on Windows
! RUN: %python %S/../test_errors.py %s %flang -fopenmp
! OpenMP Version 4.5
! 2.7.4 workshare Construct
diff --git a/libc/cmake/modules/LLVMLibCObjectRules.cmake b/libc/cmake/modules/LLVMLibCObjectRules.cmake
index 0649e9f7a767..134c5143d6d6 100644
--- a/libc/cmake/modules/LLVMLibCObjectRules.cmake
+++ b/libc/cmake/modules/LLVMLibCObjectRules.cmake
@@ -246,9 +246,6 @@ function(create_entrypoint_object fq_target_name)
if(NOT ADD_ENTRYPOINT_OBJ_SRCS)
message(FATAL_ERROR "`add_entrypoint_object` rule requires SRCS to be specified.")
endif()
- if(NOT ADD_ENTRYPOINT_OBJ_HDRS)
- message(FATAL_ERROR "`add_entrypoint_object` rule requires HDRS to be specified.")
- endif()
if(NOT ADD_ENTRYPOINT_OBJ_CXX_STANDARD)
set(ADD_ENTRYPOINT_OBJ_CXX_STANDARD ${CMAKE_CXX_STANDARD})
endif()
diff --git a/libc/config/baremetal/arm/entrypoints.txt b/libc/config/baremetal/arm/entrypoints.txt
index 4e3d1cb9f533..7fb82c60a1bb 100644
--- a/libc/config/baremetal/arm/entrypoints.txt
+++ b/libc/config/baremetal/arm/entrypoints.txt
@@ -183,6 +183,10 @@ set(TARGET_LIBC_ENTRYPOINTS
# time.h entrypoints
libc.src.time.difftime
+
+ # internal entrypoints
+ libc.startup.baremetal.init
+ libc.startup.baremetal.fini
)
set(TARGET_LIBM_ENTRYPOINTS
diff --git a/libc/config/baremetal/riscv/entrypoints.txt b/libc/config/baremetal/riscv/entrypoints.txt
index 7efd9bcd5b3c..b769b43f03a2 100644
--- a/libc/config/baremetal/riscv/entrypoints.txt
+++ b/libc/config/baremetal/riscv/entrypoints.txt
@@ -183,6 +183,10 @@ set(TARGET_LIBC_ENTRYPOINTS
# time.h entrypoints
libc.src.time.difftime
+
+ # internal entrypoints
+ libc.startup.baremetal.init
+ libc.startup.baremetal.fini
)
set(TARGET_LIBM_ENTRYPOINTS
diff --git a/libc/include/llvm-libc-macros/linux/CMakeLists.txt b/libc/include/llvm-libc-macros/linux/CMakeLists.txt
index a07803103eef..461b190c02ea 100644
--- a/libc/include/llvm-libc-macros/linux/CMakeLists.txt
+++ b/libc/include/llvm-libc-macros/linux/CMakeLists.txt
@@ -1,13 +1,7 @@
-add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/mips)
-add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/sparc)
-
add_header(
error_number_macros
HDR
error-number-macros.h
- DEPENDS
- .mips.error_number_macros
- .sparc.error_number_macros
)
add_header(
diff --git a/libc/include/llvm-libc-macros/linux/error-number-macros.h b/libc/include/llvm-libc-macros/linux/error-number-macros.h
index 4c8b3feb3dc3..1643a70918da 100644
--- a/libc/include/llvm-libc-macros/linux/error-number-macros.h
+++ b/libc/include/llvm-libc-macros/linux/error-number-macros.h
@@ -1,13 +1,6 @@
#ifndef LLVM_LIBC_MACROS_LINUX_ERROR_NUMBER_MACROS_H
#define LLVM_LIBC_MACROS_LINUX_ERROR_NUMBER_MACROS_H
-#if defined(__mips__)
-#include "mips/error-number-macros.h"
-
-#elif defined(__sparc__)
-#include "sparc/error-number-macros.h"
-
-#else
#ifndef ECANCELED
#define ECANCELED 125
#endif // ECANCELED
@@ -27,6 +20,5 @@
#ifndef EHWPOISON
#define EHWPOISON 133
#endif // EHWPOISON
-#endif
#endif // LLVM_LIBC_MACROS_LINUX_ERROR_NUMBER_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/mips/CMakeLists.txt b/libc/include/llvm-libc-macros/linux/mips/CMakeLists.txt
deleted file mode 100644
index eee4cfd19396..000000000000
--- a/libc/include/llvm-libc-macros/linux/mips/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-add_header(
- error_number_macros
- HDR
- error-number-macros.h
-)
diff --git a/libc/include/llvm-libc-macros/linux/mips/error-number-macros.h b/libc/include/llvm-libc-macros/linux/mips/error-number-macros.h
deleted file mode 100644
index af2a4243e3ce..000000000000
--- a/libc/include/llvm-libc-macros/linux/mips/error-number-macros.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef LLVM_LIBC_MACROS_LINUX_MIPS_ERROR_NUMBER_MACROS_H
-#define LLVM_LIBC_MACROS_LINUX_MIPS_ERROR_NUMBER_MACROS_H
-
-#ifndef ECANCELED
-#define ECANCELED 158
-#endif // ECANCELED
-
-#ifndef EOWNERDEAD
-#define EOWNERDEAD 165
-#endif // EOWNERDEAD
-
-#ifndef ENOTRECOVERABLE
-#define ENOTRECOVERABLE 166
-#endif // ENOTRECOVERABLE
-
-#ifndef ERFKILL
-#define ERFKILL 167
-#endif // ERFKILL
-
-#ifndef EHWPOISON
-#define EHWPOISON 168
-#endif // EHWPOISON
-
-#endif // LLVM_LIBC_MACROS_LINUX_MIPS_ERROR_NUMBER_MACROS_H
diff --git a/libc/include/llvm-libc-macros/linux/sparc/CMakeLists.txt b/libc/include/llvm-libc-macros/linux/sparc/CMakeLists.txt
deleted file mode 100644
index eee4cfd19396..000000000000
--- a/libc/include/llvm-libc-macros/linux/sparc/CMakeLists.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-add_header(
- error_number_macros
- HDR
- error-number-macros.h
-)
diff --git a/libc/include/llvm-libc-macros/linux/sparc/error-number-macros.h b/libc/include/llvm-libc-macros/linux/sparc/error-number-macros.h
deleted file mode 100644
index 76a1408bf760..000000000000
--- a/libc/include/llvm-libc-macros/linux/sparc/error-number-macros.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#ifndef LLVM_LIBC_MACROS_LINUX_SPARC_ERROR_NUMBER_MACROS_H
-#define LLVM_LIBC_MACROS_LINUX_SPARC_ERROR_NUMBER_MACROS_H
-
-#ifndef ECANCELED
-#define ECANCELED 127
-#endif // ECANCELED
-
-#ifndef EOWNERDEAD
-#define EOWNERDEAD 132
-#endif // EOWNERDEAD
-
-#ifndef ENOTRECOVERABLE
-#define ENOTRECOVERABLE 133
-#endif // ENOTRECOVERABLE
-
-#ifndef ERFKILL
-#define ERFKILL 134
-#endif // ERFKILL
-
-#ifndef EHWPOISON
-#define EHWPOISON 135
-#endif // EHWPOISON
-
-#endif // LLVM_LIBC_MACROS_LINUX_SPARC_ERROR_NUMBER_MACROS_H
diff --git a/libc/src/__support/threads/CMakeLists.txt b/libc/src/__support/threads/CMakeLists.txt
index 34412be4dfed..9ea0b59befe7 100644
--- a/libc/src/__support/threads/CMakeLists.txt
+++ b/libc/src/__support/threads/CMakeLists.txt
@@ -71,3 +71,12 @@ if(TARGET libc.src.__support.threads.${LIBC_TARGET_OS}.callonce)
.${LIBC_TARGET_OS}.callonce
)
endif()
+
+if(TARGET libc.src.__support.threads.${LIBC_TARGET_OS}.CndVar)
+ add_object_library(
+ CndVar
+ ALIAS
+ DEPENDS
+ .${LIBC_TARGET_OS}.CndVar
+ )
+endif()
diff --git a/libc/src/__support/threads/CndVar.h b/libc/src/__support/threads/CndVar.h
new file mode 100644
index 000000000000..baa2a686c57d
--- /dev/null
+++ b/libc/src/__support/threads/CndVar.h
@@ -0,0 +1,52 @@
+//===-- A platform independent abstraction layer for cond vars --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_LIBC___SUPPORT_SRC_THREADS_LINUX_CNDVAR_H
+#define LLVM_LIBC___SUPPORT_SRC_THREADS_LINUX_CNDVAR_H
+
+#include "src/__support/threads/linux/futex_utils.h" // Futex
+#include "src/__support/threads/mutex.h" // Mutex
+
+#include <stdint.h> // uint32_t
+
+namespace LIBC_NAMESPACE {
+
+struct CndVar {
+ enum CndWaiterStatus : uint32_t {
+ WS_Waiting = 0xE,
+ WS_Signalled = 0x5,
+ };
+
+ struct CndWaiter {
+ Futex futex_word = WS_Waiting;
+ CndWaiter *next = nullptr;
+ };
+
+ CndWaiter *waitq_front;
+ CndWaiter *waitq_back;
+ Mutex qmtx;
+
+ static int init(CndVar *cv) {
+ cv->waitq_front = cv->waitq_back = nullptr;
+ auto err = Mutex::init(&cv->qmtx, false, false, false);
+ return err == MutexError::NONE ? 0 : -1;
+ }
+
+ static void destroy(CndVar *cv) {
+ cv->waitq_front = cv->waitq_back = nullptr;
+ }
+
+ // Returns 0 on success, -1 on error.
+ int wait(Mutex *m);
+ void notify_one();
+ void broadcast();
+};
+
+} // namespace LIBC_NAMESPACE
+
+#endif // LLVM_LIBC_SRC___SUPPORT_THREADS_LINUX_CNDVAR_H
diff --git a/libc/src/__support/threads/linux/CMakeLists.txt b/libc/src/__support/threads/linux/CMakeLists.txt
index d3353f6b3ff8..39c4ad20201c 100644
--- a/libc/src/__support/threads/linux/CMakeLists.txt
+++ b/libc/src/__support/threads/linux/CMakeLists.txt
@@ -63,3 +63,16 @@ add_object_library(
DEPENDS
.futex_utils
)
+
+add_object_library(
+ CndVar
+ SRCS
+ CndVar.cpp
+ HDRS
+ ../CndVar.h
+ DEPENDS
+ libc.include.sys_syscall
+ libc.src.__support.OSUtil.osutil
+ libc.src.__support.threads.linux.futex_word_type
+ libc.src.__support.threads.mutex
+)
diff --git a/libc/src/__support/threads/linux/CndVar.cpp b/libc/src/__support/threads/linux/CndVar.cpp
new file mode 100644
index 000000000000..daf56bca1ed2
--- /dev/null
+++ b/libc/src/__support/threads/linux/CndVar.cpp
@@ -0,0 +1,103 @@
+//===-- Utility condition variable class ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "src/__support/threads/CndVar.h"
+#include "src/__support/OSUtil/syscall.h" // syscall_impl
+#include "src/__support/threads/linux/futex_word.h" // FutexWordType
+#include "src/__support/threads/mutex.h" // Mutex, MutexLock
+
+#include <sys/syscall.h> // For syscall numbers.
+
+namespace LIBC_NAMESPACE {
+
+int CndVar::wait(Mutex *m) {
+ // The goal is to perform "unlock |m| and wait" in an
+ // atomic operation. However, it is not possible to do it
+ // in the true sense so we do it in spirit. Before unlocking
+ // |m|, a new waiter object is added to the waiter queue with
+ // the waiter queue locked. Iff a signalling thread signals
+ // the waiter before the waiter actually starts waiting, the
+ // wait operation will not begin at all and the waiter immediately
+ // returns.
+
+ CndWaiter waiter;
+ {
+ MutexLock ml(&qmtx);
+ CndWaiter *old_back = nullptr;
+ if (waitq_front == nullptr) {
+ waitq_front = waitq_back = &waiter;
+ } else {
+ old_back = waitq_back;
+ waitq_back->next = &waiter;
+ waitq_back = &waiter;
+ }
+
+ if (m->unlock() != MutexError::NONE) {
+ // If we do not remove the queued up waiter before returning,
+ // then another thread can potentially signal a non-existing
+ // waiter. Note also that we do this with |qmtx| locked. This
+ // ensures that another thread will not signal the withdrawing
+ // waiter.
+ waitq_back = old_back;
+ if (waitq_back == nullptr)
+ waitq_front = nullptr;
+ else
+ waitq_back->next = nullptr;
+
+ return -1;
+ }
+ }
+
+ waiter.futex_word.wait(WS_Waiting, cpp::nullopt, true);
+
+ // At this point, if locking |m| fails, we can simply return as the
+ // queued up waiter would have been removed from the queue.
+ auto err = m->lock();
+ return err == MutexError::NONE ? 0 : -1;
+}
+
+void CndVar::notify_one() {
+ // We don't use an RAII locker in this method as we want to unlock
+ // |qmtx| and signal the waiter using a single FUTEX_WAKE_OP signal.
+ qmtx.lock();
+ if (waitq_front == nullptr)
+ qmtx.unlock();
+
+ CndWaiter *first = waitq_front;
+ waitq_front = waitq_front->next;
+ if (waitq_front == nullptr)
+ waitq_back = nullptr;
+
+ qmtx.futex_word = FutexWordType(Mutex::LockState::Free);
+
+ // this is a special WAKE_OP, so we use syscall directly
+ LIBC_NAMESPACE::syscall_impl<long>(
+ FUTEX_SYSCALL_ID, &qmtx.futex_word.val, FUTEX_WAKE_OP, 1, 1,
+ &first->futex_word.val,
+ FUTEX_OP(FUTEX_OP_SET, WS_Signalled, FUTEX_OP_CMP_EQ, WS_Waiting));
+}
+
+void CndVar::broadcast() {
+ MutexLock ml(&qmtx);
+ uint32_t dummy_futex_word;
+ CndWaiter *waiter = waitq_front;
+ waitq_front = waitq_back = nullptr;
+ while (waiter != nullptr) {
+ // FUTEX_WAKE_OP is used instead of just FUTEX_WAKE as it allows us to
+ // atomically update the waiter status to WS_Signalled before waking
+ // up the waiter. A dummy location is used for the other futex of
+ // FUTEX_WAKE_OP.
+ LIBC_NAMESPACE::syscall_impl<long>(
+ FUTEX_SYSCALL_ID, &dummy_futex_word, FUTEX_WAKE_OP, 1, 1,
+ &waiter->futex_word.val,
+ FUTEX_OP(FUTEX_OP_SET, WS_Signalled, FUTEX_OP_CMP_EQ, WS_Waiting));
+ waiter = waiter->next;
+ }
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/src/setjmp/x86_64/CMakeLists.txt b/libc/src/setjmp/x86_64/CMakeLists.txt
index 9899c00e7c4a..ae84322a6540 100644
--- a/libc/src/setjmp/x86_64/CMakeLists.txt
+++ b/libc/src/setjmp/x86_64/CMakeLists.txt
@@ -9,6 +9,11 @@ add_entrypoint_object(
COMPILE_OPTIONS
-O3
-fno-omit-frame-pointer
+ # TODO: Remove once one of these lands:
+ # https://github.com/llvm/llvm-project/pull/87837
+ # https://github.com/llvm/llvm-project/pull/88054
+ # https://github.com/llvm/llvm-project/pull/88157
+ -ftrivial-auto-var-init=uninitialized
)
add_entrypoint_object(
diff --git a/libc/src/threads/linux/CMakeLists.txt b/libc/src/threads/linux/CMakeLists.txt
index 68b7106c2052..a5a02e47aab3 100644
--- a/libc/src/threads/linux/CMakeLists.txt
+++ b/libc/src/threads/linux/CMakeLists.txt
@@ -1,7 +1,6 @@
add_header_library(
threads_utils
HDRS
- CndVar.h
Futex.h
DEPENDS
libc.include.sys_syscall
@@ -20,8 +19,8 @@ add_entrypoint_object(
HDRS
../cnd_init.h
DEPENDS
- .threads_utils
libc.include.threads
+ libc.src.__support.threads.CndVar
)
add_entrypoint_object(
@@ -31,8 +30,8 @@ add_entrypoint_object(
HDRS
../cnd_destroy.h
DEPENDS
- .threads_utils
libc.include.threads
+ libc.src.__support.threads.CndVar
)
add_entrypoint_object(
@@ -42,9 +41,9 @@ add_entrypoint_object(
HDRS
../cnd_wait.h
DEPENDS
- .threads_utils
libc.include.threads
libc.src.__support.threads.mutex
+ libc.src.__support.threads.CndVar
)
add_entrypoint_object(
@@ -54,8 +53,8 @@ add_entrypoint_object(
HDRS
../cnd_signal.h
DEPENDS
- .threads_utils
libc.include.threads
+ libc.src.__support.threads.CndVar
)
add_entrypoint_object(
@@ -65,6 +64,6 @@ add_entrypoint_object(
HDRS
../cnd_broadcast.h
DEPENDS
- .threads_utils
libc.include.threads
+ libc.src.__support.threads.CndVar
)
diff --git a/libc/src/threads/linux/CndVar.h b/libc/src/threads/linux/CndVar.h
deleted file mode 100644
index c08ffa393856..000000000000
--- a/libc/src/threads/linux/CndVar.h
+++ /dev/null
@@ -1,148 +0,0 @@
-//===-- Utility condition variable class ------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIBC_SRC_THREADS_LINUX_CNDVAR_H
-#define LLVM_LIBC_SRC_THREADS_LINUX_CNDVAR_H
-
-#include "src/__support/CPP/atomic.h"
-#include "src/__support/CPP/mutex.h" // lock_guard
-#include "src/__support/CPP/optional.h"
-#include "src/__support/OSUtil/syscall.h" // For syscall functions.
-#include "src/__support/threads/linux/futex_utils.h"
-#include "src/__support/threads/mutex.h"
-
-#include <linux/futex.h> // For futex operations.
-#include <stdint.h>
-#include <sys/syscall.h> // For syscall numbers.
-#include <threads.h> // For values like thrd_success etc.
-
-namespace LIBC_NAMESPACE {
-
-struct CndVar {
- enum CndWaiterStatus : uint32_t {
- WS_Waiting = 0xE,
- WS_Signalled = 0x5,
- };
-
- struct CndWaiter {
- Futex futex_word = WS_Waiting;
- CndWaiter *next = nullptr;
- };
-
- CndWaiter *waitq_front;
- CndWaiter *waitq_back;
- Mutex qmtx;
-
- static int init(CndVar *cv) {
- cv->waitq_front = cv->waitq_back = nullptr;
- auto err = Mutex::init(&cv->qmtx, false, false, false);
- return err == MutexError::NONE ? thrd_success : thrd_error;
- }
-
- static void destroy(CndVar *cv) {
- cv->waitq_front = cv->waitq_back = nullptr;
- }
-
- int wait(Mutex *m) {
- // The goal is to perform "unlock |m| and wait" in an
- // atomic operation. However, it is not possible to do it
- // in the true sense so we do it in spirit. Before unlocking
- // |m|, a new waiter object is added to the waiter queue with
- // the waiter queue locked. Iff a signalling thread signals
- // the waiter before the waiter actually starts waiting, the
- // wait operation will not begin at all and the waiter immediately
- // returns.
-
- CndWaiter waiter;
- {
- cpp::lock_guard ml(qmtx);
- CndWaiter *old_back = nullptr;
- if (waitq_front == nullptr) {
- waitq_front = waitq_back = &waiter;
- } else {
- old_back = waitq_back;
- waitq_back->next = &waiter;
- waitq_back = &waiter;
- }
-
- if (m->unlock() != MutexError::NONE) {
- // If we do not remove the queued up waiter before returning,
- // then another thread can potentially signal a non-existing
- // waiter. Note also that we do this with |qmtx| locked. This
- // ensures that another thread will not signal the withdrawing
- // waiter.
- waitq_back = old_back;
- if (waitq_back == nullptr)
- waitq_front = nullptr;
- else
- waitq_back->next = nullptr;
-
- return thrd_error;
- }
- }
-
- waiter.futex_word.wait(WS_Waiting, cpp::nullopt, true);
-
- // At this point, if locking |m| fails, we can simply return as the
- // queued up waiter would have been removed from the queue.
- auto err = m->lock();
- return err == MutexError::NONE ? thrd_success : thrd_error;
- }
-
- int notify_one() {
- // We don't use an RAII locker in this method as we want to unlock
- // |qmtx| and signal the waiter using a single FUTEX_WAKE_OP signal.
- qmtx.lock();
- if (waitq_front == nullptr) {
- qmtx.unlock();
- return thrd_success;
- }
-
- CndWaiter *first = waitq_front;
- waitq_front = waitq_front->next;
- if (waitq_front == nullptr)
- waitq_back = nullptr;
-
- qmtx.futex_word = FutexWordType(Mutex::LockState::Free);
-
- // this is a special WAKE_OP, so we use syscall directly
- LIBC_NAMESPACE::syscall_impl<long>(
- FUTEX_SYSCALL_ID, &qmtx.futex_word.val, FUTEX_WAKE_OP, 1, 1,
- &first->futex_word.val,
- FUTEX_OP(FUTEX_OP_SET, WS_Signalled, FUTEX_OP_CMP_EQ, WS_Waiting));
- return thrd_success;
- }
-
- int broadcast() {
- cpp::lock_guard ml(qmtx);
- uint32_t dummy_futex_word;
- CndWaiter *waiter = waitq_front;
- waitq_front = waitq_back = nullptr;
- while (waiter != nullptr) {
- // FUTEX_WAKE_OP is used instead of just FUTEX_WAKE as it allows us to
- // atomically update the waiter status to WS_Signalled before waking
- // up the waiter. A dummy location is used for the other futex of
- // FUTEX_WAKE_OP.
- LIBC_NAMESPACE::syscall_impl<long>(
- FUTEX_SYSCALL_ID, &dummy_futex_word, FUTEX_WAKE_OP, 1, 1,
- &waiter->futex_word.val,
- FUTEX_OP(FUTEX_OP_SET, WS_Signalled, FUTEX_OP_CMP_EQ, WS_Waiting));
- waiter = waiter->next;
- }
- return thrd_success;
- }
-};
-
-static_assert(sizeof(CndVar) == sizeof(cnd_t),
- "Mismatch in the size of the "
- "internal representation of condition variable and the public "
- "cnd_t type.");
-
-} // namespace LIBC_NAMESPACE
-
-#endif // LLVM_LIBC_SRC_THREADS_LINUX_CNDVAR_H
diff --git a/libc/src/threads/linux/cnd_broadcast.cpp b/libc/src/threads/linux/cnd_broadcast.cpp
index 180ac6d68ee8..a56aaa21ee12 100644
--- a/libc/src/threads/linux/cnd_broadcast.cpp
+++ b/libc/src/threads/linux/cnd_broadcast.cpp
@@ -6,16 +6,21 @@
//
//===----------------------------------------------------------------------===//
-#include "CndVar.h"
-
#include "src/threads/cnd_broadcast.h"
#include "src/__support/common.h"
+#include "src/__support/threads/CndVar.h"
+
+// TODO: https://github.com/llvm/llvm-project/issues/92968
+#include <threads.h> // cnd_t, thrd_error, thrd_success
namespace LIBC_NAMESPACE {
+static_assert(sizeof(CndVar) == sizeof(cnd_t));
+
LLVM_LIBC_FUNCTION(int, cnd_broadcast, (cnd_t * cond)) {
CndVar *cndvar = reinterpret_cast<CndVar *>(cond);
- return cndvar->broadcast();
+ cndvar->broadcast();
+ return thrd_success;
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/threads/linux/cnd_destroy.cpp b/libc/src/threads/linux/cnd_destroy.cpp
index 08eb3a1057b1..2b03b18c48e4 100644
--- a/libc/src/threads/linux/cnd_destroy.cpp
+++ b/libc/src/threads/linux/cnd_destroy.cpp
@@ -6,13 +6,16 @@
//
//===----------------------------------------------------------------------===//
-#include "CndVar.h"
-
#include "src/threads/cnd_destroy.h"
#include "src/__support/common.h"
+#include "src/__support/threads/CndVar.h"
+
+#include <threads.h> // cnd_t
namespace LIBC_NAMESPACE {
+static_assert(sizeof(CndVar) == sizeof(cnd_t));
+
LLVM_LIBC_FUNCTION(void, cnd_destroy, (cnd_t * cond)) {
CndVar *cndvar = reinterpret_cast<CndVar *>(cond);
CndVar::destroy(cndvar);
diff --git a/libc/src/threads/linux/cnd_init.cpp b/libc/src/threads/linux/cnd_init.cpp
index 5e3f360b1d2b..d3d2c8a57d82 100644
--- a/libc/src/threads/linux/cnd_init.cpp
+++ b/libc/src/threads/linux/cnd_init.cpp
@@ -6,16 +6,19 @@
//
//===----------------------------------------------------------------------===//
-#include "CndVar.h"
-
#include "src/threads/cnd_init.h"
#include "src/__support/common.h"
+#include "src/__support/threads/CndVar.h"
+
+#include <threads.h> // cnd_t, thrd_error, thrd_success
namespace LIBC_NAMESPACE {
+static_assert(sizeof(CndVar) == sizeof(cnd_t));
+
LLVM_LIBC_FUNCTION(int, cnd_init, (cnd_t * cond)) {
CndVar *cndvar = reinterpret_cast<CndVar *>(cond);
- return CndVar::init(cndvar);
+ return CndVar::init(cndvar) ? thrd_error : thrd_success;
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/threads/linux/cnd_signal.cpp b/libc/src/threads/linux/cnd_signal.cpp
index dba01abdefbc..f144013e0882 100644
--- a/libc/src/threads/linux/cnd_signal.cpp
+++ b/libc/src/threads/linux/cnd_signal.cpp
@@ -6,16 +6,20 @@
//
//===----------------------------------------------------------------------===//
-#include "CndVar.h"
-
#include "src/threads/cnd_signal.h"
#include "src/__support/common.h"
+#include "src/__support/threads/CndVar.h"
+
+#include <threads.h> // cnd_t, thrd_error, thrd_success
namespace LIBC_NAMESPACE {
+static_assert(sizeof(CndVar) == sizeof(cnd_t));
+
LLVM_LIBC_FUNCTION(int, cnd_signal, (cnd_t * cond)) {
CndVar *cndvar = reinterpret_cast<CndVar *>(cond);
- return cndvar->notify_one();
+ cndvar->notify_one();
+ return thrd_success;
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/threads/linux/cnd_wait.cpp b/libc/src/threads/linux/cnd_wait.cpp
index db3d7f1436eb..97cade3f231d 100644
--- a/libc/src/threads/linux/cnd_wait.cpp
+++ b/libc/src/threads/linux/cnd_wait.cpp
@@ -6,18 +6,21 @@
//
//===----------------------------------------------------------------------===//
-#include "CndVar.h"
-
+#include "src/threads/cnd_wait.h"
#include "src/__support/common.h"
+#include "src/__support/threads/CndVar.h"
#include "src/__support/threads/mutex.h"
-#include "src/threads/cnd_wait.h"
+
+#include <threads.h> // cnd_t, mtx_t, thrd_error, thrd_success
namespace LIBC_NAMESPACE {
+static_assert(sizeof(CndVar) == sizeof(cnd_t));
+
LLVM_LIBC_FUNCTION(int, cnd_wait, (cnd_t * cond, mtx_t *mtx)) {
CndVar *cndvar = reinterpret_cast<CndVar *>(cond);
Mutex *mutex = reinterpret_cast<Mutex *>(mtx);
- return cndvar->wait(mutex);
+ return cndvar->wait(mutex) ? thrd_error : thrd_success;
}
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/time/gpu/time_utils.cpp b/libc/src/time/gpu/time_utils.cpp
index 67fe5b4861ac..1a674b2fdca2 100644
--- a/libc/src/time/gpu/time_utils.cpp
+++ b/libc/src/time/gpu/time_utils.cpp
@@ -15,8 +15,7 @@ namespace LIBC_NAMESPACE {
// insufficient.
// TODO: Once we have another use-case for this we should put it in a common
// device environment struct.
-extern "C" [[gnu::visibility("protected")]] uint64_t __llvm_libc_clock_freq =
- clock_freq;
+gpu::Constant<uint64_t> __llvm_libc_clock_freq = clock_freq;
#endif
} // namespace LIBC_NAMESPACE
diff --git a/libc/src/time/gpu/time_utils.h b/libc/src/time/gpu/time_utils.h
index da713886b643..77eeb896f6c3 100644
--- a/libc/src/time/gpu/time_utils.h
+++ b/libc/src/time/gpu/time_utils.h
@@ -23,7 +23,10 @@ constexpr uint64_t clock_freq = 100000000UL;
// We provide an externally visible symbol such that the runtime can set
// this to the correct value.
-extern "C" [[gnu::visibility("protected")]] uint64_t __llvm_libc_clock_freq;
+extern "C" {
+[[gnu::visibility("protected")]]
+extern gpu::Constant<uint64_t> __llvm_libc_clock_freq;
+}
#define GPU_CLOCKS_PER_SEC static_cast<clock_t>(__llvm_libc_clock_freq)
#elif defined(LIBC_TARGET_ARCH_IS_NVPTX)
diff --git a/libc/startup/baremetal/CMakeLists.txt b/libc/startup/baremetal/CMakeLists.txt
new file mode 100644
index 000000000000..4faced93fabe
--- /dev/null
+++ b/libc/startup/baremetal/CMakeLists.txt
@@ -0,0 +1,11 @@
+add_entrypoint_object(
+ init
+ SRCS
+ init.cpp
+)
+
+add_entrypoint_object(
+ fini
+ SRCS
+ fini.cpp
+)
diff --git a/libc/startup/baremetal/fini.cpp b/libc/startup/baremetal/fini.cpp
new file mode 100644
index 000000000000..84997fb4fa1d
--- /dev/null
+++ b/libc/startup/baremetal/fini.cpp
@@ -0,0 +1,27 @@
+//===-- Implementation file of __libc_fini_array --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <stddef.h>
+#include <stdint.h>
+
+extern "C" {
+extern uintptr_t __fini_array_start[];
+extern uintptr_t __fini_array_end[];
+}
+
+namespace LIBC_NAMESPACE {
+
+using FiniCallback = void(void);
+
+extern "C" void __libc_fini_array(void) {
+ size_t fini_array_size = __fini_array_end - __fini_array_start;
+ for (size_t i = fini_array_size; i > 0; --i)
+ reinterpret_cast<FiniCallback *>(__fini_array_start[i - 1])();
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/startup/baremetal/init.cpp b/libc/startup/baremetal/init.cpp
new file mode 100644
index 000000000000..08dff74f0519
--- /dev/null
+++ b/libc/startup/baremetal/init.cpp
@@ -0,0 +1,32 @@
+//===-- Implementation file of __libc_init_array --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <stddef.h>
+#include <stdint.h>
+
+extern "C" {
+extern uintptr_t __preinit_array_start[];
+extern uintptr_t __preinit_array_end[];
+extern uintptr_t __init_array_start[];
+extern uintptr_t __init_array_end[];
+}
+
+namespace LIBC_NAMESPACE {
+
+using InitCallback = void(void);
+
+extern "C" void __libc_init_array(void) {
+ size_t preinit_array_size = __preinit_array_end - __preinit_array_start;
+ for (size_t i = 0; i < preinit_array_size; ++i)
+ reinterpret_cast<InitCallback *>(__preinit_array_start[i])();
+ size_t init_array_size = __init_array_end - __init_array_start;
+ for (size_t i = 0; i < init_array_size; ++i)
+ reinterpret_cast<InitCallback *>(__init_array_start[i])();
+}
+
+} // namespace LIBC_NAMESPACE
diff --git a/libc/test/integration/scudo/CMakeLists.txt b/libc/test/integration/scudo/CMakeLists.txt
index 8a085b618044..a5f7e3b63d24 100644
--- a/libc/test/integration/scudo/CMakeLists.txt
+++ b/libc/test/integration/scudo/CMakeLists.txt
@@ -9,6 +9,7 @@ endif()
# test will have to link to the LLVM libc startup system. LLVM libc's startup
# system is not complete enough to allow this. It is also desireable to
# keep the dependencies as minimal as possible.
+
add_entrypoint_library(
libc_for_scudo_integration_test
DEPENDS
@@ -17,6 +18,9 @@ add_entrypoint_library(
libc.src.stdlib.realloc
libc.src.stdlib.aligned_alloc
libc.src.stdlib.free
+ libc.src.errno.errno
+ libc.src.unistd.__llvm_libc_syscall
+ libc.src.sched.__sched_getcpucount
)
add_executable(
diff --git a/libcxx/docs/ReleaseNotes/19.rst b/libcxx/docs/ReleaseNotes/19.rst
index 83fcd40bb80c..0bc343acd281 100644
--- a/libcxx/docs/ReleaseNotes/19.rst
+++ b/libcxx/docs/ReleaseNotes/19.rst
@@ -53,6 +53,7 @@ Implemented Papers
- P2387R3 - Pipe support for user-defined range adaptors
- P2713R1 - Escaping improvements in ``std::format``
- P2231R1 - Missing ``constexpr`` in ``std::optional`` and ``std::variant``
+- P0019R8 - ``std::atomic_ref``
Improvements and New Features
-----------------------------
diff --git a/libcxx/docs/Status/Cxx20Papers.csv b/libcxx/docs/Status/Cxx20Papers.csv
index 955aa5f614af..6598cd18358f 100644
--- a/libcxx/docs/Status/Cxx20Papers.csv
+++ b/libcxx/docs/Status/Cxx20Papers.csv
@@ -26,7 +26,7 @@
"`P0905R1 <https://wg21.link/P0905R1>`__","CWG","Symmetry for spaceship","Jacksonville","|Complete|","7.0","|spaceship|"
"`P0966R1 <https://wg21.link/P0966R1>`__","LWG","``string::reserve``\ Should Not Shrink","Jacksonville","|Complete| [#note-P0966]_","12.0"
"","","","","","",""
-"`P0019R8 <https://wg21.link/P0019R8>`__","LWG","Atomic Ref","Rapperswil","",""
+"`P0019R8 <https://wg21.link/P0019R8>`__","LWG","Atomic Ref","Rapperswil","|Complete|","19.0"
"`P0458R2 <https://wg21.link/P0458R2>`__","LWG","Checking for Existence of an Element in Associative Containers","Rapperswil","|Complete|","13.0"
"`P0475R1 <https://wg21.link/P0475R1>`__","LWG","LWG 2511: guaranteed copy elision for piecewise construction","Rapperswil","|Complete|",""
"`P0476R2 <https://wg21.link/P0476R2>`__","LWG","Bit-casting object representations","Rapperswil","|Complete|","14.0"
@@ -125,7 +125,7 @@
"`P1612R1 <https://wg21.link/P1612R1>`__","LWG","Relocate Endian's Specification","Cologne","|Complete|","10.0"
"`P1614R2 <https://wg21.link/P1614R2>`__","LWG","The Mothership has Landed","Cologne","|In Progress|",""
"`P1638R1 <https://wg21.link/P1638R1>`__","LWG","basic_istream_view::iterator should not be copyable","Cologne","|Complete|","16.0","|ranges|"
-"`P1643R1 <https://wg21.link/P1643R1>`__","LWG","Add wait/notify to atomic_ref","Cologne","",""
+"`P1643R1 <https://wg21.link/P1643R1>`__","LWG","Add wait/notify to atomic_ref","Cologne","|Complete|","19.0"
"`P1644R0 <https://wg21.link/P1644R0>`__","LWG","Add wait/notify to atomic<shared_ptr>","Cologne","",""
"`P1650R0 <https://wg21.link/P1650R0>`__","LWG","Output std::chrono::days with 'd' suffix","Cologne","|Complete|","16.0"
"`P1651R0 <https://wg21.link/P1651R0>`__","LWG","bind_front should not unwrap reference_wrapper","Cologne","|Complete|","13.0"
diff --git a/libcxx/docs/Status/Cxx23Issues.csv b/libcxx/docs/Status/Cxx23Issues.csv
index d421feef8db9..cc601b3cd3c9 100644
--- a/libcxx/docs/Status/Cxx23Issues.csv
+++ b/libcxx/docs/Status/Cxx23Issues.csv
@@ -98,7 +98,7 @@
`3555 <https://wg21.link/LWG3555>`__,"``{transform,elements}_view::iterator::iterator_concept`` should consider const-qualification of the underlying range","June 2021","","","|ranges|"
"","","","","",""
`2191 <https://wg21.link/LWG2191>`__,"Incorrect specification of ``match_results(match_results&&)``","October 2021","|Nothing To Do|",""
-`2381 <https://wg21.link/LWG2381>`__,"Inconsistency in parsing floating point numbers","October 2021","",""
+`2381 <https://wg21.link/LWG2381>`__,"Inconsistency in parsing floating point numbers","October 2021","|Complete|","19.0"
`2762 <https://wg21.link/LWG2762>`__,"``unique_ptr operator*()`` should be ``noexcept``","October 2021","",""
`3121 <https://wg21.link/LWG3121>`__,"``tuple`` constructor constraints for ``UTypes&&...`` overloads","October 2021","",""
`3123 <https://wg21.link/LWG3123>`__,"``duration`` constructor from representation shouldn't be effectively non-throwing","October 2021","","","|chrono|"
diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index 01e9c247560c..954e0c04ec85 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -224,6 +224,7 @@ set(files
__atomic/atomic_flag.h
__atomic/atomic_init.h
__atomic/atomic_lock_free.h
+ __atomic/atomic_ref.h
__atomic/atomic_sync.h
__atomic/check_memory_order.h
__atomic/contention_t.h
@@ -232,6 +233,7 @@ set(files
__atomic/is_always_lock_free.h
__atomic/kill_dependency.h
__atomic/memory_order.h
+ __atomic/to_gcc_order.h
__availability
__bit/bit_cast.h
__bit/bit_ceil.h
diff --git a/libcxx/include/__atomic/atomic_ref.h b/libcxx/include/__atomic/atomic_ref.h
new file mode 100644
index 000000000000..156f1961151c
--- /dev/null
+++ b/libcxx/include/__atomic/atomic_ref.h
@@ -0,0 +1,360 @@
+// -*- C++ -*-
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+// Kokkos v. 4.0
+// Copyright (2022) National Technology & Engineering
+// Solutions of Sandia, LLC (NTESS).
+//
+// Under the terms of Contract DE-NA0003525 with NTESS,
+// the U.S. Government retains certain rights in this software.
+//
+//===---------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ATOMIC_ATOMIC_REF_H
+#define _LIBCPP___ATOMIC_ATOMIC_REF_H
+
+#include <__assert>
+#include <__atomic/atomic_sync.h>
+#include <__atomic/check_memory_order.h>
+#include <__atomic/to_gcc_order.h>
+#include <__concepts/arithmetic.h>
+#include <__concepts/same_as.h>
+#include <__config>
+#include <__memory/addressof.h>
+#include <__type_traits/has_unique_object_representation.h>
+#include <__type_traits/is_trivially_copyable.h>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_PUSH_MACROS
+#include <__undef_macros>
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if _LIBCPP_STD_VER >= 20
+
+template <class _Tp>
+struct __atomic_ref_base {
+protected:
+ _Tp* __ptr_;
+
+ _LIBCPP_HIDE_FROM_ABI __atomic_ref_base(_Tp& __obj) : __ptr_(std::addressof(__obj)) {}
+
+private:
+ _LIBCPP_HIDE_FROM_ABI static _Tp* __clear_padding(_Tp& __val) noexcept {
+ _Tp* __ptr = std::addressof(__val);
+# if __has_builtin(__builtin_clear_padding)
+ __builtin_clear_padding(__ptr);
+# endif
+ return __ptr;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI static bool __compare_exchange(
+ _Tp* __ptr, _Tp* __expected, _Tp* __desired, bool __is_weak, int __success, int __failure) noexcept {
+ if constexpr (
+# if __has_builtin(__builtin_clear_padding)
+ has_unique_object_representations_v<_Tp> || floating_point<_Tp>
+# else
+ true // NOLINT(readability-simplify-boolean-expr)
+# endif
+ ) {
+ return __atomic_compare_exchange(__ptr, __expected, __desired, __is_weak, __success, __failure);
+ } else { // _Tp has padding bits and __builtin_clear_padding is available
+ __clear_padding(*__desired);
+ _Tp __copy = *__expected;
+ __clear_padding(__copy);
+ // The algorithm we use here is basically to perform `__atomic_compare_exchange` on the
+ // values until it has either succeeded, or failed because the value representation of the
+ // objects involved was different. This is why we loop around __atomic_compare_exchange:
+ // we basically loop until its failure is caused by the value representation of the objects
+ // being different, not only their object representation.
+ while (true) {
+ _Tp __prev = __copy;
+ if (__atomic_compare_exchange(__ptr, std::addressof(__copy), __desired, __is_weak, __success, __failure)) {
+ return true;
+ }
+ _Tp __curr = __copy;
+ if (std::memcmp(__clear_padding(__prev), __clear_padding(__curr), sizeof(_Tp)) != 0) {
+ // Value representation without padding bits do not compare equal ->
+ // write the current content of *ptr into *expected
+ std::memcpy(__expected, std::addressof(__copy), sizeof(_Tp));
+ return false;
+ }
+ }
+ }
+ }
+
+ friend struct __atomic_waitable_traits<__atomic_ref_base<_Tp>>;
+
+public:
+ using value_type = _Tp;
+
+ static constexpr size_t required_alignment = alignof(_Tp);
+
+ // The __atomic_always_lock_free builtin takes into account the alignment of the pointer if provided,
+ // so we create a fake pointer with a suitable alignment when querying it. Note that we are guaranteed
+ // that the pointer is going to be aligned properly at runtime because that is a (checked) precondition
+ // of atomic_ref's constructor.
+ static constexpr bool is_always_lock_free =
+ __atomic_always_lock_free(sizeof(_Tp), reinterpret_cast<void*>(-required_alignment));
+
+ _LIBCPP_HIDE_FROM_ABI bool is_lock_free() const noexcept { return __atomic_is_lock_free(sizeof(_Tp), __ptr_); }
+
+ _LIBCPP_HIDE_FROM_ABI void store(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept
+ _LIBCPP_CHECK_STORE_MEMORY_ORDER(__order) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ __order == memory_order::relaxed || __order == memory_order::release || __order == memory_order::seq_cst,
+ "atomic_ref: memory order argument to atomic store operation is invalid");
+ __atomic_store(__ptr_, __clear_padding(__desired), std::__to_gcc_order(__order));
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept {
+ store(__desired);
+ return __desired;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp load(memory_order __order = memory_order::seq_cst) const noexcept
+ _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__order) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
+ __order == memory_order::seq_cst,
+ "atomic_ref: memory order argument to atomic load operation is invalid");
+ alignas(_Tp) byte __mem[sizeof(_Tp)];
+ auto* __ret = reinterpret_cast<_Tp*>(__mem);
+ __atomic_load(__ptr_, __ret, std::__to_gcc_order(__order));
+ return *__ret;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI operator _Tp() const noexcept { return load(); }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp exchange(_Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+ alignas(_Tp) byte __mem[sizeof(_Tp)];
+ auto* __ret = reinterpret_cast<_Tp*>(__mem);
+ __atomic_exchange(__ptr_, __clear_padding(__desired), __ret, std::__to_gcc_order(__order));
+ return *__ret;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ __failure == memory_order::relaxed || __failure == memory_order::consume ||
+ __failure == memory_order::acquire || __failure == memory_order::seq_cst,
+ "atomic_ref: failure memory order argument to weak atomic compare-and-exchange operation is invalid");
+ return __compare_exchange(
+ __ptr_,
+ std::addressof(__expected),
+ std::addressof(__desired),
+ true,
+ std::__to_gcc_order(__success),
+ std::__to_gcc_order(__failure));
+ }
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __success, memory_order __failure) const noexcept
+ _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__success, __failure) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ __failure == memory_order::relaxed || __failure == memory_order::consume ||
+ __failure == memory_order::acquire || __failure == memory_order::seq_cst,
+ "atomic_ref: failure memory order argument to strong atomic compare-and-exchange operation is invalid");
+ return __compare_exchange(
+ __ptr_,
+ std::addressof(__expected),
+ std::addressof(__desired),
+ false,
+ std::__to_gcc_order(__success),
+ std::__to_gcc_order(__failure));
+ }
+
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_weak(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+ return __compare_exchange(
+ __ptr_,
+ std::addressof(__expected),
+ std::addressof(__desired),
+ true,
+ std::__to_gcc_order(__order),
+ std::__to_gcc_failure_order(__order));
+ }
+ _LIBCPP_HIDE_FROM_ABI bool
+ compare_exchange_strong(_Tp& __expected, _Tp __desired, memory_order __order = memory_order::seq_cst) const noexcept {
+ return __compare_exchange(
+ __ptr_,
+ std::addressof(__expected),
+ std::addressof(__desired),
+ false,
+ std::__to_gcc_order(__order),
+ std::__to_gcc_failure_order(__order));
+ }
+
+ _LIBCPP_HIDE_FROM_ABI void wait(_Tp __old, memory_order __order = memory_order::seq_cst) const noexcept
+ _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__order) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ __order == memory_order::relaxed || __order == memory_order::consume || __order == memory_order::acquire ||
+ __order == memory_order::seq_cst,
+ "atomic_ref: memory order argument to atomic wait operation is invalid");
+ std::__atomic_wait(*this, __old, __order);
+ }
+ _LIBCPP_HIDE_FROM_ABI void notify_one() const noexcept { std::__atomic_notify_one(*this); }
+ _LIBCPP_HIDE_FROM_ABI void notify_all() const noexcept { std::__atomic_notify_all(*this); }
+};
+
+template <class _Tp>
+struct __atomic_waitable_traits<__atomic_ref_base<_Tp>> {
+ static _LIBCPP_HIDE_FROM_ABI _Tp __atomic_load(const __atomic_ref_base<_Tp>& __a, memory_order __order) {
+ return __a.load(__order);
+ }
+ static _LIBCPP_HIDE_FROM_ABI const _Tp* __atomic_contention_address(const __atomic_ref_base<_Tp>& __a) {
+ return __a.__ptr_;
+ }
+};
+
+template <class _Tp>
+struct atomic_ref : public __atomic_ref_base<_Tp> {
+ static_assert(is_trivially_copyable_v<_Tp>, "std::atomic_ref<T> requires that 'T' be a trivially copyable type");
+
+ using __base = __atomic_ref_base<_Tp>;
+
+ _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
+ "atomic_ref ctor: referenced object must be aligned to required_alignment");
+ }
+
+ _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+
+ atomic_ref& operator=(const atomic_ref&) = delete;
+};
+
+template <class _Tp>
+ requires(std::integral<_Tp> && !std::same_as<bool, _Tp>)
+struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> {
+ using __base = __atomic_ref_base<_Tp>;
+
+ using difference_type = __base::value_type;
+
+ _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
+ "atomic_ref ctor: referenced object must be aligned to required_alignment");
+ }
+
+ _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+
+ atomic_ref& operator=(const atomic_ref&) = delete;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_add(this->__ptr_, __arg, std::__to_gcc_order(__order));
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_sub(this->__ptr_, __arg, std::__to_gcc_order(__order));
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_and(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_and(this->__ptr_, __arg, std::__to_gcc_order(__order));
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_or(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_or(this->__ptr_, __arg, std::__to_gcc_order(__order));
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_xor(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_xor(this->__ptr_, __arg, std::__to_gcc_order(__order));
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator++(int) const noexcept { return fetch_add(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator--(int) const noexcept { return fetch_sub(_Tp(1)); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator++() const noexcept { return fetch_add(_Tp(1)) + _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator--() const noexcept { return fetch_sub(_Tp(1)) - _Tp(1); }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator&=(_Tp __arg) const noexcept { return fetch_and(__arg) & __arg; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator|=(_Tp __arg) const noexcept { return fetch_or(__arg) | __arg; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator^=(_Tp __arg) const noexcept { return fetch_xor(__arg) ^ __arg; }
+};
+
+template <class _Tp>
+ requires std::floating_point<_Tp>
+struct atomic_ref<_Tp> : public __atomic_ref_base<_Tp> {
+ using __base = __atomic_ref_base<_Tp>;
+
+ using difference_type = __base::value_type;
+
+ _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp& __obj) : __base(__obj) {
+ _LIBCPP_ASSERT_ARGUMENT_WITHIN_DOMAIN(
+ reinterpret_cast<uintptr_t>(std::addressof(__obj)) % __base::required_alignment == 0,
+ "atomic_ref ctor: referenced object must be aligned to required_alignment");
+ }
+
+ _LIBCPP_HIDE_FROM_ABI atomic_ref(const atomic_ref&) noexcept = default;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator=(_Tp __desired) const noexcept { return __base::operator=(__desired); }
+
+ atomic_ref& operator=(const atomic_ref&) = delete;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_add(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _Tp __old = this->load(memory_order_relaxed);
+ _Tp __new = __old + __arg;
+ while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) {
+ __new = __old + __arg;
+ }
+ return __old;
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp fetch_sub(_Tp __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ _Tp __old = this->load(memory_order_relaxed);
+ _Tp __new = __old - __arg;
+ while (!this->compare_exchange_weak(__old, __new, __order, memory_order_relaxed)) {
+ __new = __old - __arg;
+ }
+ return __old;
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp operator+=(_Tp __arg) const noexcept { return fetch_add(__arg) + __arg; }
+ _LIBCPP_HIDE_FROM_ABI _Tp operator-=(_Tp __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+};
+
+template <class _Tp>
+struct atomic_ref<_Tp*> : public __atomic_ref_base<_Tp*> {
+ using __base = __atomic_ref_base<_Tp*>;
+
+ using difference_type = ptrdiff_t;
+
+ _LIBCPP_HIDE_FROM_ABI explicit atomic_ref(_Tp*& __ptr) : __base(__ptr) {}
+
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator=(_Tp* __desired) const noexcept { return __base::operator=(__desired); }
+
+ atomic_ref& operator=(const atomic_ref&) = delete;
+
+ _LIBCPP_HIDE_FROM_ABI _Tp* fetch_add(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_add(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order));
+ }
+ _LIBCPP_HIDE_FROM_ABI _Tp* fetch_sub(ptrdiff_t __arg, memory_order __order = memory_order_seq_cst) const noexcept {
+ return __atomic_fetch_sub(this->__ptr_, __arg * sizeof(_Tp), std::__to_gcc_order(__order));
+ }
+
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator++(int) const noexcept { return fetch_add(1); }
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator--(int) const noexcept { return fetch_sub(1); }
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator++() const noexcept { return fetch_add(1) + 1; }
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator--() const noexcept { return fetch_sub(1) - 1; }
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator+=(ptrdiff_t __arg) const noexcept { return fetch_add(__arg) + __arg; }
+ _LIBCPP_HIDE_FROM_ABI _Tp* operator-=(ptrdiff_t __arg) const noexcept { return fetch_sub(__arg) - __arg; }
+};
+
+_LIBCPP_CTAD_SUPPORTED_FOR_TYPE(atomic_ref);
+
+#endif // _LIBCPP_STD_VER >= 20
+
+_LIBCPP_END_NAMESPACE_STD
+
+_LIBCPP_POP_MACROS
+
+#endif // _LIBCPP__ATOMIC_ATOMIC_REF_H
diff --git a/libcxx/include/__atomic/atomic_sync.h b/libcxx/include/__atomic/atomic_sync.h
index e583dca38c4c..175700be54c0 100644
--- a/libcxx/include/__atomic/atomic_sync.h
+++ b/libcxx/include/__atomic/atomic_sync.h
@@ -12,6 +12,7 @@
#include <__atomic/contention_t.h>
#include <__atomic/cxx_atomic_impl.h>
#include <__atomic/memory_order.h>
+#include <__atomic/to_gcc_order.h>
#include <__availability>
#include <__chrono/duration.h>
#include <__config>
diff --git a/libcxx/include/__atomic/check_memory_order.h b/libcxx/include/__atomic/check_memory_order.h
index 3012aec0521b..536f764a6190 100644
--- a/libcxx/include/__atomic/check_memory_order.h
+++ b/libcxx/include/__atomic/check_memory_order.h
@@ -27,4 +27,8 @@
_LIBCPP_DIAGNOSE_WARNING(__f == memory_order_release || __f == memory_order_acq_rel, \
"memory order argument to atomic operation is invalid")
+#define _LIBCPP_CHECK_WAIT_MEMORY_ORDER(__m) \
+ _LIBCPP_DIAGNOSE_WARNING(__m == memory_order_release || __m == memory_order_acq_rel, \
+ "memory order argument to atomic operation is invalid")
+
#endif // _LIBCPP___ATOMIC_CHECK_MEMORY_ORDER_H
diff --git a/libcxx/include/__atomic/cxx_atomic_impl.h b/libcxx/include/__atomic/cxx_atomic_impl.h
index b900cc135f78..18e88aa97bec 100644
--- a/libcxx/include/__atomic/cxx_atomic_impl.h
+++ b/libcxx/include/__atomic/cxx_atomic_impl.h
@@ -10,6 +10,7 @@
#define _LIBCPP___ATOMIC_CXX_ATOMIC_IMPL_H
#include <__atomic/memory_order.h>
+#include <__atomic/to_gcc_order.h>
#include <__config>
#include <__memory/addressof.h>
#include <__type_traits/is_assignable.h>
@@ -54,32 +55,6 @@ struct __cxx_atomic_base_impl {
_Tp __a_value;
};
-_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) {
- // Avoid switch statement to make this a constexpr.
- return __order == memory_order_relaxed
- ? __ATOMIC_RELAXED
- : (__order == memory_order_acquire
- ? __ATOMIC_ACQUIRE
- : (__order == memory_order_release
- ? __ATOMIC_RELEASE
- : (__order == memory_order_seq_cst
- ? __ATOMIC_SEQ_CST
- : (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_CONSUME))));
-}
-
-_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) {
- // Avoid switch statement to make this a constexpr.
- return __order == memory_order_relaxed
- ? __ATOMIC_RELAXED
- : (__order == memory_order_acquire
- ? __ATOMIC_ACQUIRE
- : (__order == memory_order_release
- ? __ATOMIC_RELAXED
- : (__order == memory_order_seq_cst
- ? __ATOMIC_SEQ_CST
- : (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME))));
-}
-
template <typename _Tp>
_LIBCPP_HIDE_FROM_ABI void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) {
__cxx_atomic_assign_volatile(__a->__a_value, __val);
diff --git a/libcxx/include/__atomic/to_gcc_order.h b/libcxx/include/__atomic/to_gcc_order.h
new file mode 100644
index 000000000000..d04c111addd3
--- /dev/null
+++ b/libcxx/include/__atomic/to_gcc_order.h
@@ -0,0 +1,54 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef _LIBCPP___ATOMIC_TO_GCC_ORDER_H
+#define _LIBCPP___ATOMIC_TO_GCC_ORDER_H
+
+#include <__atomic/memory_order.h>
+#include <__config>
+
+#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
+# pragma GCC system_header
+#endif
+
+_LIBCPP_BEGIN_NAMESPACE_STD
+
+#if defined(__ATOMIC_RELAXED) && defined(__ATOMIC_CONSUME) && defined(__ATOMIC_ACQUIRE) && \
+ defined(__ATOMIC_RELEASE) && defined(__ATOMIC_ACQ_REL) && defined(__ATOMIC_SEQ_CST)
+
+_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) {
+ // Avoid switch statement to make this a constexpr.
+ return __order == memory_order_relaxed
+ ? __ATOMIC_RELAXED
+ : (__order == memory_order_acquire
+ ? __ATOMIC_ACQUIRE
+ : (__order == memory_order_release
+ ? __ATOMIC_RELEASE
+ : (__order == memory_order_seq_cst
+ ? __ATOMIC_SEQ_CST
+ : (__order == memory_order_acq_rel ? __ATOMIC_ACQ_REL : __ATOMIC_CONSUME))));
+}
+
+_LIBCPP_HIDE_FROM_ABI inline _LIBCPP_CONSTEXPR int __to_gcc_failure_order(memory_order __order) {
+ // Avoid switch statement to make this a constexpr.
+ return __order == memory_order_relaxed
+ ? __ATOMIC_RELAXED
+ : (__order == memory_order_acquire
+ ? __ATOMIC_ACQUIRE
+ : (__order == memory_order_release
+ ? __ATOMIC_RELAXED
+ : (__order == memory_order_seq_cst
+ ? __ATOMIC_SEQ_CST
+ : (__order == memory_order_acq_rel ? __ATOMIC_ACQUIRE : __ATOMIC_CONSUME))));
+}
+
+#endif
+
+_LIBCPP_END_NAMESPACE_STD
+
+#endif // _LIBCPP___ATOMIC_TO_GCC_ORDER_H
diff --git a/libcxx/include/__type_traits/has_unique_object_representation.h b/libcxx/include/__type_traits/has_unique_object_representation.h
index c0ada5618f0e..1aa044990032 100644
--- a/libcxx/include/__type_traits/has_unique_object_representation.h
+++ b/libcxx/include/__type_traits/has_unique_object_representation.h
@@ -11,8 +11,6 @@
#include <__config>
#include <__type_traits/integral_constant.h>
-#include <__type_traits/remove_all_extents.h>
-#include <__type_traits/remove_cv.h>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
@@ -24,10 +22,10 @@ _LIBCPP_BEGIN_NAMESPACE_STD
template <class _Tp>
struct _LIBCPP_TEMPLATE_VIS has_unique_object_representations
- : public integral_constant<bool, __has_unique_object_representations(remove_cv_t<remove_all_extents_t<_Tp>>)> {};
+ : public integral_constant<bool, __has_unique_object_representations(_Tp)> {};
template <class _Tp>
-inline constexpr bool has_unique_object_representations_v = has_unique_object_representations<_Tp>::value;
+inline constexpr bool has_unique_object_representations_v = __has_unique_object_representations(_Tp);
#endif
diff --git a/libcxx/include/atomic b/libcxx/include/atomic
index cb142b09bff3..80a0f9ee373e 100644
--- a/libcxx/include/atomic
+++ b/libcxx/include/atomic
@@ -599,6 +599,7 @@ template <class T>
#include <__atomic/atomic_flag.h>
#include <__atomic/atomic_init.h>
#include <__atomic/atomic_lock_free.h>
+#include <__atomic/atomic_ref.h>
#include <__atomic/atomic_sync.h>
#include <__atomic/check_memory_order.h>
#include <__atomic/contention_t.h>
diff --git a/libcxx/include/libcxx.imp b/libcxx/include/libcxx.imp
new file mode 100644
index 000000000000..f6aa1ea6b62b
--- /dev/null
+++ b/libcxx/include/libcxx.imp
@@ -0,0 +1,869 @@
+[
+ { include: [ "<__algorithm/adjacent_find.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/all_of.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/any_of.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/binary_search.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/clamp.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/comp.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/comp_ref_type.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/copy_backward.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/copy_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/copy_move_common.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/copy_n.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/count.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/count_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/equal.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/equal_range.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/fill.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/fill_n.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/find.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/find_end.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/find_first_of.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/find_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/find_if_not.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/find_segment_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/fold.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/for_each.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/for_each_n.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/for_each_segment.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/generate.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/generate_n.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/half_positive.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/in_found_result.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/in_fun_result.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/in_in_out_result.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/in_in_result.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/in_out_out_result.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/in_out_result.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/includes.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/inplace_merge.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/is_heap.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/is_heap_until.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/is_partitioned.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/is_permutation.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/is_sorted.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/is_sorted_until.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/iter_swap.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/iterator_operations.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/lexicographical_compare.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/lexicographical_compare_three_way.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/lower_bound.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/make_heap.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/make_projected.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/max.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/max_element.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/merge.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/min.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/min_element.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/min_max_result.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/minmax.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/minmax_element.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/mismatch.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/move.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/move_backward.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/next_permutation.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/none_of.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/nth_element.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/partial_sort.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/partial_sort_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/partition.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/partition_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/partition_point.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pop_heap.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/prev_permutation.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_any_all_none_of.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backend.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backend.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backends/any_of.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backends/backend.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backends/fill.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backends/find_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backends/for_each.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backends/libdispatch.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backends/merge.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backends/serial.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backends/stable_sort.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backends/thread.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backends/transform.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_backends/cpu_backends/transform_reduce.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_count.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_equal.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_fill.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_find.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_for_each.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_frontend_dispatch.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_generate.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_is_partitioned.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_merge.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_move.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_replace.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_rotate_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_sort.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_stable_sort.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/pstl_transform.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/push_heap.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_adjacent_find.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_all_of.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_any_of.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_binary_search.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_clamp.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_contains.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_contains_subrange.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_copy_backward.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_copy_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_copy_n.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_count.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_count_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_ends_with.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_equal.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_equal_range.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_fill.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_fill_n.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_find.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_find_end.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_find_first_of.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_find_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_find_if_not.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_for_each.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_for_each_n.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_generate.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_generate_n.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_includes.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_inplace_merge.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_is_heap.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_is_heap_until.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_is_partitioned.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_is_permutation.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_is_sorted.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_is_sorted_until.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_iterator_concept.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_lexicographical_compare.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_lower_bound.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_make_heap.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_max.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_max_element.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_merge.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_min.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_min_element.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_minmax.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_minmax_element.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_mismatch.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_move.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_move_backward.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_next_permutation.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_none_of.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_nth_element.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_partial_sort.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_partial_sort_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_partition.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_partition_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_partition_point.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_pop_heap.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_prev_permutation.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_push_heap.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_remove.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_remove_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_remove_copy_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_remove_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_replace.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_replace_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_replace_copy_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_replace_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_reverse.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_reverse_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_rotate.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_rotate_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_sample.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_search.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_search_n.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_set_difference.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_set_intersection.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_set_symmetric_difference.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_set_union.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_shuffle.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_sort.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_sort_heap.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_stable_partition.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_stable_sort.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_starts_with.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_swap_ranges.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_transform.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_unique.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_unique_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/ranges_upper_bound.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/remove.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/remove_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/remove_copy_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/remove_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/replace.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/replace_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/replace_copy_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/replace_if.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/reverse.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/reverse_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/rotate.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/rotate_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/sample.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/search.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/search_n.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/set_difference.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/set_intersection.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/set_symmetric_difference.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/set_union.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/shift_left.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/shift_right.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/shuffle.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/sift_down.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/simd_utils.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/sort.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/sort_heap.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/stable_partition.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/stable_sort.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/swap_ranges.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/three_way_comp_ref_type.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/transform.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/uniform_random_bit_generator_adaptor.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/unique.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/unique_copy.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/unwrap_iter.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/unwrap_range.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__algorithm/upper_bound.h>", "private", "<algorithm>", "public" ] },
+ { include: [ "<__atomic/aliases.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/atomic.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/atomic_base.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/atomic_flag.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/atomic_init.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/atomic_lock_free.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/atomic_ref.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/atomic_sync.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/check_memory_order.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/contention_t.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/cxx_atomic_impl.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/fence.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/is_always_lock_free.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/kill_dependency.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/memory_order.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__atomic/to_gcc_order.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__bit/bit_cast.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/bit_ceil.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/bit_floor.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/bit_log2.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/bit_width.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/blsr.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/byteswap.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/countl.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/countr.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/endian.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/has_single_bit.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/invert_if.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/popcount.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__bit/rotate.h>", "private", "<bit>", "public" ] },
+ { include: [ "<__charconv/chars_format.h>", "private", "<charconv>", "public" ] },
+ { include: [ "<__charconv/from_chars_integral.h>", "private", "<charconv>", "public" ] },
+ { include: [ "<__charconv/from_chars_result.h>", "private", "<charconv>", "public" ] },
+ { include: [ "<__charconv/tables.h>", "private", "<charconv>", "public" ] },
+ { include: [ "<__charconv/to_chars.h>", "private", "<charconv>", "public" ] },
+ { include: [ "<__charconv/to_chars_base_10.h>", "private", "<charconv>", "public" ] },
+ { include: [ "<__charconv/to_chars_floating_point.h>", "private", "<charconv>", "public" ] },
+ { include: [ "<__charconv/to_chars_integral.h>", "private", "<charconv>", "public" ] },
+ { include: [ "<__charconv/to_chars_result.h>", "private", "<charconv>", "public" ] },
+ { include: [ "<__charconv/traits.h>", "private", "<charconv>", "public" ] },
+ { include: [ "<__chrono/calendar.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/concepts.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/convert_to_timespec.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/convert_to_tm.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/day.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/duration.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/file_clock.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/formatter.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/hh_mm_ss.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/high_resolution_clock.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/leap_second.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/literals.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/month.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/month_weekday.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/monthday.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/ostream.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/parser_std_format_spec.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/statically_widen.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/steady_clock.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/sys_info.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/system_clock.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/time_point.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/time_zone.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/time_zone_link.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/tzdb.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/tzdb_list.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/weekday.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/year.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/year_month.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/year_month_day.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__chrono/year_month_weekday.h>", "private", "<chrono>", "public" ] },
+ { include: [ "<__compare/common_comparison_category.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__compare/compare_partial_order_fallback.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__compare/compare_strong_order_fallback.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__compare/compare_three_way.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__compare/compare_three_way_result.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__compare/compare_weak_order_fallback.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__compare/is_eq.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__compare/ordering.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__compare/partial_order.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__compare/strong_order.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__compare/synth_three_way.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__compare/three_way_comparable.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__compare/weak_order.h>", "private", "<compare>", "public" ] },
+ { include: [ "<__concepts/arithmetic.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/assignable.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/boolean_testable.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/class_or_enum.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/common_reference_with.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/common_with.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/constructible.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/convertible_to.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/copyable.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/derived_from.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/destructible.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/different_from.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/equality_comparable.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/invocable.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/movable.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/predicate.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/regular.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/relation.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/same_as.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/semiregular.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/swappable.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__concepts/totally_ordered.h>", "private", "<concepts>", "public" ] },
+ { include: [ "<__condition_variable/condition_variable.h>", "private", "<condition_variable>", "public" ] },
+ { include: [ "<__coroutine/coroutine_handle.h>", "private", "<coroutine>", "public" ] },
+ { include: [ "<__coroutine/coroutine_traits.h>", "private", "<coroutine>", "public" ] },
+ { include: [ "<__coroutine/noop_coroutine_handle.h>", "private", "<coroutine>", "public" ] },
+ { include: [ "<__coroutine/trivial_awaitables.h>", "private", "<coroutine>", "public" ] },
+ { include: [ "<__exception/exception.h>", "private", "<exception>", "public" ] },
+ { include: [ "<__exception/exception_ptr.h>", "private", "<exception>", "public" ] },
+ { include: [ "<__exception/nested_exception.h>", "private", "<exception>", "public" ] },
+ { include: [ "<__exception/operations.h>", "private", "<exception>", "public" ] },
+ { include: [ "<__exception/terminate.h>", "private", "<exception>", "public" ] },
+ { include: [ "<__expected/bad_expected_access.h>", "private", "<expected>", "public" ] },
+ { include: [ "<__expected/expected.h>", "private", "<expected>", "public" ] },
+ { include: [ "<__expected/unexpect.h>", "private", "<expected>", "public" ] },
+ { include: [ "<__expected/unexpected.h>", "private", "<expected>", "public" ] },
+ { include: [ "<__filesystem/copy_options.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/directory_entry.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/directory_iterator.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/directory_options.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/file_status.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/file_time_type.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/file_type.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/filesystem_error.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/operations.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/path.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/path_iterator.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/perm_options.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/perms.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/recursive_directory_iterator.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/space_info.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__filesystem/u8path.h>", "private", "<filesystem>", "public" ] },
+ { include: [ "<__format/buffer.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/concepts.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/container_adaptor.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/enable_insertable.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/escaped_output_table.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/extended_grapheme_cluster_table.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/format_arg.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/format_arg_store.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/format_args.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/format_context.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/format_error.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/format_functions.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/format_parse_context.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/format_string.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/format_to_n_result.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/formatter.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/formatter_bool.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/formatter_char.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/formatter_floating_point.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/formatter_integer.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/formatter_integral.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/formatter_output.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/formatter_pointer.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/formatter_string.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/formatter_tuple.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/indic_conjunct_break_table.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/parser_std_format_spec.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/range_default_formatter.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/range_formatter.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/unicode.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/width_estimation_table.h>", "private", "<format>", "public" ] },
+ { include: [ "<__format/write_escaped.h>", "private", "<format>", "public" ] },
+ { include: [ "<__functional/binary_function.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/binary_negate.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/bind.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/bind_back.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/bind_front.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/binder1st.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/binder2nd.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/boyer_moore_searcher.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/compose.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/default_searcher.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/function.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/hash.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/identity.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/invoke.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/is_transparent.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/mem_fn.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/mem_fun_ref.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/not_fn.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/operations.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/perfect_forward.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/pointer_to_binary_function.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/pointer_to_unary_function.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/ranges_operations.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/reference_wrapper.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/unary_function.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/unary_negate.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__functional/weak_result_type.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__fwd/array.h>", "private", "<array>", "public" ] },
+ { include: [ "<__fwd/bit_reference.h>", "private", "<bitset>", "public" ] },
+ { include: [ "<__fwd/bit_reference.h>", "private", "<vector>", "public" ] },
+ { include: [ "<__fwd/complex.h>", "private", "<complex>", "public" ] },
+ { include: [ "<__fwd/deque.h>", "private", "<deque>", "public" ] },
+ { include: [ "<__fwd/format.h>", "private", "<format>", "public" ] },
+ { include: [ "<__fwd/fstream.h>", "private", "<iosfwd>", "public" ] },
+ { include: [ "<__fwd/functional.h>", "private", "<functional>", "public" ] },
+ { include: [ "<__fwd/ios.h>", "private", "<iosfwd>", "public" ] },
+ { include: [ "<__fwd/istream.h>", "private", "<iosfwd>", "public" ] },
+ { include: [ "<__fwd/mdspan.h>", "private", "<mdspan>", "public" ] },
+ { include: [ "<__fwd/memory.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__fwd/memory_resource.h>", "private", "<memory_resource>", "public" ] },
+ { include: [ "<__fwd/ostream.h>", "private", "<iosfwd>", "public" ] },
+ { include: [ "<__fwd/pair.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__fwd/queue.h>", "private", "<queue>", "public" ] },
+ { include: [ "<__fwd/span.h>", "private", "<span>", "public" ] },
+ { include: [ "<__fwd/sstream.h>", "private", "<iosfwd>", "public" ] },
+ { include: [ "<__fwd/stack.h>", "private", "<stack>", "public" ] },
+ { include: [ "<__fwd/streambuf.h>", "private", "<iosfwd>", "public" ] },
+ { include: [ "<__fwd/string.h>", "private", "<string>", "public" ] },
+ { include: [ "<__fwd/string_view.h>", "private", "<string_view>", "public" ] },
+ { include: [ "<__fwd/subrange.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__fwd/tuple.h>", "private", "<tuple>", "public" ] },
+ { include: [ "<__fwd/vector.h>", "private", "<vector>", "public" ] },
+ { include: [ "<__ios/fpos.h>", "private", "<ios>", "public" ] },
+ { include: [ "<__iterator/access.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/advance.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/back_insert_iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/bounded_iter.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/common_iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/concepts.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/counted_iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/cpp17_iterator_concepts.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/data.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/default_sentinel.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/distance.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/empty.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/erase_if_container.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/front_insert_iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/incrementable_traits.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/indirectly_comparable.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/insert_iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/istream_iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/istreambuf_iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/iter_move.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/iter_swap.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/iterator_traits.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/iterator_with_data.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/mergeable.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/move_iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/move_sentinel.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/next.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/ostream_iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/ostreambuf_iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/permutable.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/prev.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/projected.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/ranges_iterator_traits.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/readable_traits.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/reverse_access.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/reverse_iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/segmented_iterator.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/size.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/sortable.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/unreachable_sentinel.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__iterator/wrap_iter.h>", "private", "<iterator>", "public" ] },
+ { include: [ "<__locale_dir/locale_base_api.h>", "private", "<locale>", "public" ] },
+ { include: [ "<__locale_dir/locale_base_api/android.h>", "private", "<locale>", "public" ] },
+ { include: [ "<__locale_dir/locale_base_api/bsd_locale_defaults.h>", "private", "<locale>", "public" ] },
+ { include: [ "<__locale_dir/locale_base_api/bsd_locale_fallbacks.h>", "private", "<locale>", "public" ] },
+ { include: [ "<__locale_dir/locale_base_api/fuchsia.h>", "private", "<locale>", "public" ] },
+ { include: [ "<__locale_dir/locale_base_api/ibm.h>", "private", "<locale>", "public" ] },
+ { include: [ "<__locale_dir/locale_base_api/locale_guard.h>", "private", "<locale>", "public" ] },
+ { include: [ "<__locale_dir/locale_base_api/musl.h>", "private", "<locale>", "public" ] },
+ { include: [ "<__locale_dir/locale_base_api/newlib.h>", "private", "<locale>", "public" ] },
+ { include: [ "<__locale_dir/locale_base_api/openbsd.h>", "private", "<locale>", "public" ] },
+ { include: [ "<__locale_dir/locale_base_api/win32.h>", "private", "<locale>", "public" ] },
+ { include: [ "<__math/abs.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/copysign.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/error_functions.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/exponential_functions.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/fdim.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/fma.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/gamma.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/hyperbolic_functions.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/hypot.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/inverse_hyperbolic_functions.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/inverse_trigonometric_functions.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/logarithms.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/min_max.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/modulo.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/remainder.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/roots.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/rounding_functions.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/traits.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__math/trigonometric_functions.h>", "private", "<cmath>", "public" ] },
+ { include: [ "<__mdspan/default_accessor.h>", "private", "<mdspan>", "public" ] },
+ { include: [ "<__mdspan/extents.h>", "private", "<mdspan>", "public" ] },
+ { include: [ "<__mdspan/layout_left.h>", "private", "<mdspan>", "public" ] },
+ { include: [ "<__mdspan/layout_right.h>", "private", "<mdspan>", "public" ] },
+ { include: [ "<__mdspan/layout_stride.h>", "private", "<mdspan>", "public" ] },
+ { include: [ "<__mdspan/mdspan.h>", "private", "<mdspan>", "public" ] },
+ { include: [ "<__memory/addressof.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/align.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/aligned_alloc.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/allocate_at_least.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/allocation_guard.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/allocator.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/allocator_arg_t.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/allocator_destructor.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/allocator_traits.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/assume_aligned.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/auto_ptr.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/builtin_new_allocator.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/compressed_pair.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/concepts.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/construct_at.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/destruct_n.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/pointer_traits.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/ranges_construct_at.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/ranges_uninitialized_algorithms.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/raw_storage_iterator.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/shared_ptr.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/swap_allocator.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/temp_value.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/temporary_buffer.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/uninitialized_algorithms.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/unique_ptr.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/uses_allocator.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/uses_allocator_construction.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory/voidify.h>", "private", "<memory>", "public" ] },
+ { include: [ "<__memory_resource/memory_resource.h>", "private", "<memory_resource>", "public" ] },
+ { include: [ "<__memory_resource/monotonic_buffer_resource.h>", "private", "<memory_resource>", "public" ] },
+ { include: [ "<__memory_resource/polymorphic_allocator.h>", "private", "<memory_resource>", "public" ] },
+ { include: [ "<__memory_resource/pool_options.h>", "private", "<memory_resource>", "public" ] },
+ { include: [ "<__memory_resource/synchronized_pool_resource.h>", "private", "<memory_resource>", "public" ] },
+ { include: [ "<__memory_resource/unsynchronized_pool_resource.h>", "private", "<memory_resource>", "public" ] },
+ { include: [ "<__mutex/lock_guard.h>", "private", "<mutex>", "public" ] },
+ { include: [ "<__mutex/mutex.h>", "private", "<mutex>", "public" ] },
+ { include: [ "<__mutex/once_flag.h>", "private", "<mutex>", "public" ] },
+ { include: [ "<__mutex/tag_types.h>", "private", "<mutex>", "public" ] },
+ { include: [ "<__mutex/unique_lock.h>", "private", "<mutex>", "public" ] },
+ { include: [ "<__numeric/accumulate.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/adjacent_difference.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/exclusive_scan.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/gcd_lcm.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/inclusive_scan.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/inner_product.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/iota.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/midpoint.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/partial_sum.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/pstl_reduce.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/pstl_transform_reduce.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/reduce.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/saturation_arithmetic.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/transform_exclusive_scan.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/transform_inclusive_scan.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__numeric/transform_reduce.h>", "private", "<numeric>", "public" ] },
+ { include: [ "<__random/bernoulli_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/binomial_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/cauchy_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/chi_squared_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/clamp_to_integral.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/default_random_engine.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/discard_block_engine.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/discrete_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/exponential_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/extreme_value_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/fisher_f_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/gamma_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/generate_canonical.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/geometric_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/independent_bits_engine.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/is_seed_sequence.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/is_valid.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/knuth_b.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/linear_congruential_engine.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/log2.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/lognormal_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/mersenne_twister_engine.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/negative_binomial_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/normal_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/piecewise_constant_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/piecewise_linear_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/poisson_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/random_device.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/ranlux.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/seed_seq.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/shuffle_order_engine.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/student_t_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/subtract_with_carry_engine.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/uniform_int_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/uniform_random_bit_generator.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/uniform_real_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__random/weibull_distribution.h>", "private", "<random>", "public" ] },
+ { include: [ "<__ranges/access.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/all.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/as_rvalue_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/chunk_by_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/common_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/concepts.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/container_compatible_range.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/counted.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/dangling.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/data.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/drop_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/drop_while_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/elements_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/empty.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/empty_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/enable_borrowed_range.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/enable_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/filter_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/from_range.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/iota_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/istream_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/join_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/lazy_split_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/movable_box.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/non_propagating_cache.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/owning_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/range_adaptor.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/rbegin.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/ref_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/rend.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/repeat_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/reverse_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/single_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/size.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/split_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/subrange.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/take_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/take_while_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/to.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/transform_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/view_interface.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/views.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__ranges/zip_view.h>", "private", "<ranges>", "public" ] },
+ { include: [ "<__stop_token/atomic_unique_lock.h>", "private", "<stop_token>", "public" ] },
+ { include: [ "<__stop_token/intrusive_list_view.h>", "private", "<stop_token>", "public" ] },
+ { include: [ "<__stop_token/intrusive_shared_ptr.h>", "private", "<stop_token>", "public" ] },
+ { include: [ "<__stop_token/stop_callback.h>", "private", "<stop_token>", "public" ] },
+ { include: [ "<__stop_token/stop_source.h>", "private", "<stop_token>", "public" ] },
+ { include: [ "<__stop_token/stop_state.h>", "private", "<stop_token>", "public" ] },
+ { include: [ "<__stop_token/stop_token.h>", "private", "<stop_token>", "public" ] },
+ { include: [ "<__string/char_traits.h>", "private", "<string>", "public" ] },
+ { include: [ "<__string/constexpr_c_functions.h>", "private", "<string>", "public" ] },
+ { include: [ "<__string/extern_template_lists.h>", "private", "<string>", "public" ] },
+ { include: [ "<__system_error/errc.h>", "private", "<system_error>", "public" ] },
+ { include: [ "<__system_error/error_category.h>", "private", "<system_error>", "public" ] },
+ { include: [ "<__system_error/error_code.h>", "private", "<system_error>", "public" ] },
+ { include: [ "<__system_error/error_condition.h>", "private", "<system_error>", "public" ] },
+ { include: [ "<__system_error/system_error.h>", "private", "<system_error>", "public" ] },
+ { include: [ "<__thread/formatter.h>", "private", "<thread>", "public" ] },
+ { include: [ "<__thread/id.h>", "private", "<thread>", "public" ] },
+ { include: [ "<__thread/jthread.h>", "private", "<thread>", "public" ] },
+ { include: [ "<__thread/poll_with_backoff.h>", "private", "<thread>", "public" ] },
+ { include: [ "<__thread/support.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__thread/support.h>", "private", "<mutex>", "public" ] },
+ { include: [ "<__thread/support.h>", "private", "<semaphore>", "public" ] },
+ { include: [ "<__thread/support.h>", "private", "<thread>", "public" ] },
+ { include: [ "<__thread/support/c11.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__thread/support/c11.h>", "private", "<mutex>", "public" ] },
+ { include: [ "<__thread/support/c11.h>", "private", "<semaphore>", "public" ] },
+ { include: [ "<__thread/support/c11.h>", "private", "<thread>", "public" ] },
+ { include: [ "<__thread/support/external.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__thread/support/external.h>", "private", "<mutex>", "public" ] },
+ { include: [ "<__thread/support/external.h>", "private", "<semaphore>", "public" ] },
+ { include: [ "<__thread/support/external.h>", "private", "<thread>", "public" ] },
+ { include: [ "<__thread/support/pthread.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__thread/support/pthread.h>", "private", "<mutex>", "public" ] },
+ { include: [ "<__thread/support/pthread.h>", "private", "<semaphore>", "public" ] },
+ { include: [ "<__thread/support/pthread.h>", "private", "<thread>", "public" ] },
+ { include: [ "<__thread/support/windows.h>", "private", "<atomic>", "public" ] },
+ { include: [ "<__thread/support/windows.h>", "private", "<mutex>", "public" ] },
+ { include: [ "<__thread/support/windows.h>", "private", "<semaphore>", "public" ] },
+ { include: [ "<__thread/support/windows.h>", "private", "<thread>", "public" ] },
+ { include: [ "<__thread/this_thread.h>", "private", "<thread>", "public" ] },
+ { include: [ "<__thread/thread.h>", "private", "<thread>", "public" ] },
+ { include: [ "<__thread/timed_backoff_policy.h>", "private", "<thread>", "public" ] },
+ { include: [ "<__tuple/find_index.h>", "private", "<tuple>", "public" ] },
+ { include: [ "<__tuple/make_tuple_types.h>", "private", "<tuple>", "public" ] },
+ { include: [ "<__tuple/sfinae_helpers.h>", "private", "<tuple>", "public" ] },
+ { include: [ "<__tuple/tuple_element.h>", "private", "<tuple>", "public" ] },
+ { include: [ "<__tuple/tuple_indices.h>", "private", "<tuple>", "public" ] },
+ { include: [ "<__tuple/tuple_like.h>", "private", "<tuple>", "public" ] },
+ { include: [ "<__tuple/tuple_like_ext.h>", "private", "<tuple>", "public" ] },
+ { include: [ "<__tuple/tuple_like_no_subrange.h>", "private", "<tuple>", "public" ] },
+ { include: [ "<__tuple/tuple_size.h>", "private", "<tuple>", "public" ] },
+ { include: [ "<__tuple/tuple_types.h>", "private", "<tuple>", "public" ] },
+ { include: [ "<__type_traits/add_const.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/add_cv.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/add_lvalue_reference.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/add_pointer.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/add_rvalue_reference.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/add_volatile.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/aligned_storage.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/aligned_union.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/alignment_of.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/apply_cv.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/can_extract_key.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/common_reference.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/common_type.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/conditional.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/conjunction.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/copy_cv.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/copy_cvref.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/datasizeof.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/decay.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/dependent_type.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/desugars_to.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/disjunction.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/enable_if.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/extent.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/has_unique_object_representation.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/has_virtual_destructor.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/integral_constant.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/invoke.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_abstract.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_aggregate.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_allocator.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_always_bitcastable.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_arithmetic.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_array.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_assignable.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_base_of.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_bounded_array.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_callable.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_char_like_type.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_class.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_compound.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_const.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_constant_evaluated.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_constructible.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_convertible.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_core_convertible.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_destructible.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_empty.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_enum.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_equality_comparable.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_execution_policy.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_final.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_floating_point.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_function.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_fundamental.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_implicitly_default_constructible.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_integral.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_literal_type.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_member_function_pointer.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_member_object_pointer.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_member_pointer.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_nothrow_assignable.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_nothrow_constructible.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_nothrow_convertible.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_nothrow_destructible.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_null_pointer.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_object.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_pod.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_pointer.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_polymorphic.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_primary_template.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_reference.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_reference_wrapper.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_referenceable.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_same.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_scalar.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_scoped_enum.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_signed.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_signed_integer.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_specialization.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_standard_layout.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_swappable.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_trivial.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_trivially_assignable.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_trivially_constructible.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_trivially_copyable.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_trivially_destructible.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_trivially_lexicographically_comparable.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_trivially_relocatable.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_unbounded_array.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_union.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_unsigned.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_unsigned_integer.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_valid_expansion.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_void.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/is_volatile.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/lazy.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/make_32_64_or_128_bit.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/make_const_lvalue_ref.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/make_signed.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/make_unsigned.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/maybe_const.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/nat.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/negation.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/noexcept_move_assign_container.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/promote.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/rank.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/remove_all_extents.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/remove_const.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/remove_const_ref.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/remove_cv.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/remove_cvref.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/remove_extent.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/remove_pointer.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/remove_reference.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/remove_volatile.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/result_of.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/strip_signature.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/type_identity.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/type_list.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/underlying_type.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/unwrap_ref.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__type_traits/void_t.h>", "private", "<type_traits>", "public" ] },
+ { include: [ "<__utility/as_const.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/as_lvalue.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/auto_cast.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/cmp.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/convert_to_integral.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/declval.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/empty.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/exception_guard.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/exchange.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/forward.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/forward_like.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/in_place.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/integer_sequence.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/is_pointer_in_range.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/move.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/no_destroy.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/pair.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/piecewise_construct.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/priority_tag.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/rel_ops.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/small_buffer.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/swap.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/to_underlying.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__utility/unreachable.h>", "private", "<utility>", "public" ] },
+ { include: [ "<__variant/monostate.h>", "private", "<variant>", "public" ] },
+]
diff --git a/libcxx/include/locale b/libcxx/include/locale
index 748b276a8525..041d7bcd27fc 100644
--- a/libcxx/include/locale
+++ b/libcxx/include/locale
@@ -368,7 +368,11 @@ struct _LIBCPP_EXPORTED_FROM_ABI __num_get_base {
static const int __num_get_buf_sz = 40;
static int __get_base(ios_base&);
- static const char __src[33];
+ static const char __src[33]; // "0123456789abcdefABCDEFxX+-pPiInN"
+ // count of leading characters in __src used for parsing integers ("012..X+-")
+ static const size_t __int_chr_cnt = 26;
+ // count of leading characters in __src used for parsing floating-point values ("012..-pP")
+ static const size_t __fp_chr_cnt = 28;
};
_LIBCPP_EXPORTED_FROM_ABI void
@@ -431,7 +435,7 @@ private:
template <typename _Tp>
const _Tp* __do_widen_p(ios_base& __iob, _Tp* __atoms) const {
locale __loc = __iob.getloc();
- use_facet<ctype<_Tp> >(__loc).widen(__src, __src + 26, __atoms);
+ use_facet<ctype<_Tp> >(__loc).widen(__src, __src + __int_chr_cnt, __atoms);
return __atoms;
}
@@ -447,7 +451,7 @@ private:
template <class _CharT>
string __num_get<_CharT>::__stage2_int_prep(ios_base& __iob, _CharT* __atoms, _CharT& __thousands_sep) {
locale __loc = __iob.getloc();
- std::use_facet<ctype<_CharT> >(__loc).widen(__src, __src + 26, __atoms);
+ std::use_facet<ctype<_CharT> >(__loc).widen(__src, __src + __int_chr_cnt, __atoms);
const numpunct<_CharT>& __np = std::use_facet<numpunct<_CharT> >(__loc);
__thousands_sep = __np.thousands_sep();
return __np.grouping();
@@ -458,7 +462,7 @@ template <class _CharT>
string __num_get<_CharT>::__stage2_float_prep(
ios_base& __iob, _CharT* __atoms, _CharT& __decimal_point, _CharT& __thousands_sep) {
locale __loc = __iob.getloc();
- std::use_facet<ctype<_CharT> >(__loc).widen(__src, __src + 32, __atoms);
+ std::use_facet<ctype<_CharT> >(__loc).widen(__src, __src + __fp_chr_cnt, __atoms);
const numpunct<_CharT>& __np = std::use_facet<numpunct<_CharT> >(__loc);
__decimal_point = __np.decimal_point();
__thousands_sep = __np.thousands_sep();
@@ -490,7 +494,7 @@ __num_get<_CharT>::__stage2_int_loop(_CharT __ct, int __base, char* __a, char*&
}
return 0;
}
- ptrdiff_t __f = std::find(__atoms, __atoms + 26, __ct) - __atoms;
+ ptrdiff_t __f = std::find(__atoms, __atoms + __int_chr_cnt, __ct) - __atoms;
if (__f >= 24)
return -1;
switch (__base) {
@@ -546,8 +550,8 @@ int __num_get<_CharT>::__stage2_float_loop(
}
return 0;
}
- ptrdiff_t __f = std::find(__atoms, __atoms + 32, __ct) - __atoms;
- if (__f >= 32)
+ ptrdiff_t __f = std::find(__atoms, __atoms + __num_get_base::__fp_chr_cnt, __ct) - __atoms;
+ if (__f >= static_cast<ptrdiff_t>(__num_get_base::__fp_chr_cnt))
return -1;
char __x = __src[__f];
if (__x == '-' || __x == '+') {
@@ -846,7 +850,7 @@ _InputIterator num_get<_CharT, _InputIterator>::__do_get_signed(
int __base = this->__get_base(__iob);
// Stage 2
char_type __thousands_sep;
- const int __atoms_size = 26;
+ const int __atoms_size = __num_get_base::__int_chr_cnt;
#ifdef _LIBCPP_ABI_OPTIMIZED_LOCALE_NUM_GET
char_type __atoms1[__atoms_size];
const char_type* __atoms = this->__do_widen(__iob, __atoms1);
@@ -895,7 +899,7 @@ _InputIterator num_get<_CharT, _InputIterator>::__do_get_unsigned(
int __base = this->__get_base(__iob);
// Stage 2
char_type __thousands_sep;
- const int __atoms_size = 26;
+ const int __atoms_size = __num_get_base::__int_chr_cnt;
#ifdef _LIBCPP_ABI_OPTIMIZED_LOCALE_NUM_GET
char_type __atoms1[__atoms_size];
const char_type* __atoms = this->__do_widen(__iob, __atoms1);
@@ -942,7 +946,7 @@ _InputIterator num_get<_CharT, _InputIterator>::__do_get_floating_point(
iter_type __b, iter_type __e, ios_base& __iob, ios_base::iostate& __err, _Fp& __v) const {
// Stage 1, nothing to do
// Stage 2
- char_type __atoms[32];
+ char_type __atoms[__num_get_base::__fp_chr_cnt];
char_type __decimal_point;
char_type __thousands_sep;
string __grouping = this->__stage2_float_prep(__iob, __atoms, __decimal_point, __thousands_sep);
@@ -951,10 +955,11 @@ _InputIterator num_get<_CharT, _InputIterator>::__do_get_floating_point(
char* __a = &__buf[0];
char* __a_end = __a;
unsigned __g[__num_get_base::__num_get_buf_sz];
- unsigned* __g_end = __g;
- unsigned __dc = 0;
- bool __in_units = true;
- char __exp = 'E';
+ unsigned* __g_end = __g;
+ unsigned __dc = 0;
+ bool __in_units = true;
+ char __exp = 'E';
+ bool __is_leading_parsed = false;
for (; __b != __e; ++__b) {
if (__a_end == __a + __buf.size()) {
size_t __tmp = __buf.size();
@@ -977,6 +982,21 @@ _InputIterator num_get<_CharT, _InputIterator>::__do_get_floating_point(
__dc,
__atoms))
break;
+
+ // the leading character excluding the sign must be a decimal digit
+ if (!__is_leading_parsed) {
+ if (__a_end - __a >= 1 && __a[0] != '-' && __a[0] != '+') {
+ if ('0' <= __a[0] && __a[0] <= '9')
+ __is_leading_parsed = true;
+ else
+ break;
+ } else if (__a_end - __a >= 2 && (__a[0] == '-' || __a[0] == '+')) {
+ if ('0' <= __a[1] && __a[1] <= '9')
+ __is_leading_parsed = true;
+ else
+ break;
+ }
+ }
}
if (__grouping.size() != 0 && __in_units && __g_end - __g < __num_get_base::__num_get_buf_sz)
*__g_end++ = __dc;
@@ -996,10 +1016,11 @@ _InputIterator num_get<_CharT, _InputIterator>::do_get(
// Stage 1
int __base = 16;
// Stage 2
- char_type __atoms[26];
+ char_type __atoms[__num_get_base::__int_chr_cnt];
char_type __thousands_sep = char_type();
string __grouping;
- std::use_facet<ctype<_CharT> >(__iob.getloc()).widen(__num_get_base::__src, __num_get_base::__src + 26, __atoms);
+ std::use_facet<ctype<_CharT> >(__iob.getloc())
+ .widen(__num_get_base::__src, __num_get_base::__src + __num_get_base::__int_chr_cnt, __atoms);
string __buf;
__buf.resize(__buf.capacity());
char* __a = &__buf[0];
diff --git a/libcxx/include/module.modulemap b/libcxx/include/module.modulemap
index 70dac2f19846..8bc94d71391e 100644
--- a/libcxx/include/module.modulemap
+++ b/libcxx/include/module.modulemap
@@ -1066,7 +1066,11 @@ module std_private_atomic_atomic_flag [system] {
}
module std_private_atomic_atomic_init [system] { header "__atomic/atomic_init.h" }
module std_private_atomic_atomic_lock_free [system] { header "__atomic/atomic_lock_free.h" }
-module std_private_atomic_atomic_sync [system] { header "__atomic/atomic_sync.h" }
+module std_private_atomic_atomic_ref [system] { header "__atomic/atomic_ref.h" }
+module std_private_atomic_atomic_sync [system] {
+ header "__atomic/atomic_sync.h"
+ export std_private_atomic_to_gcc_order
+}
module std_private_atomic_check_memory_order [system] { header "__atomic/check_memory_order.h" }
module std_private_atomic_contention_t [system] { header "__atomic/contention_t.h" }
module std_private_atomic_cxx_atomic_impl [system] { header "__atomic/cxx_atomic_impl.h" }
@@ -1074,6 +1078,10 @@ module std_private_atomic_fence [system] { header "__atomic/fence.
module std_private_atomic_is_always_lock_free [system] { header "__atomic/is_always_lock_free.h" }
module std_private_atomic_kill_dependency [system] { header "__atomic/kill_dependency.h" }
module std_private_atomic_memory_order [system] { header "__atomic/memory_order.h" }
+module std_private_atomic_to_gcc_order [system] {
+ header "__atomic/to_gcc_order.h"
+ export std_private_atomic_memory_order
+}
module std_private_bit_bit_cast [system] { header "__bit/bit_cast.h" }
module std_private_bit_bit_ceil [system] { header "__bit/bit_ceil.h" }
diff --git a/libcxx/modules/std/atomic.inc b/libcxx/modules/std/atomic.inc
index d77d7a5bb0fb..e8cf90d01258 100644
--- a/libcxx/modules/std/atomic.inc
+++ b/libcxx/modules/std/atomic.inc
@@ -22,7 +22,7 @@ export namespace std {
// [atomics.ref.generic], class template atomic_ref
// [atomics.ref.pointer], partial specialization for pointers
- // using std::atomic_ref _LIBCPP_USING_IF_EXISTS;
+ using std::atomic_ref _LIBCPP_USING_IF_EXISTS;
// [atomics.types.generic], class template atomic
using std::atomic _LIBCPP_USING_IF_EXISTS;
diff --git a/libcxx/src/chrono.cpp b/libcxx/src/chrono.cpp
index e7d6dfbc2292..83e8a64504ae 100644
--- a/libcxx/src/chrono.cpp
+++ b/libcxx/src/chrono.cpp
@@ -77,8 +77,8 @@ typedef void(WINAPI* GetSystemTimeAsFileTimePtr)(LPFILETIME);
class GetSystemTimeInit {
public:
GetSystemTimeInit() {
- fp =
- (GetSystemTimeAsFileTimePtr)GetProcAddress(GetModuleHandleW(L"kernel32.dll"), "GetSystemTimePreciseAsFileTime");
+ fp = (GetSystemTimeAsFileTimePtr)(void*)GetProcAddress(
+ GetModuleHandleW(L"kernel32.dll"), "GetSystemTimePreciseAsFileTime");
if (fp == nullptr)
fp = GetSystemTimeAsFileTime;
}
diff --git a/libcxx/src/locale.cpp b/libcxx/src/locale.cpp
index 1ca88e30f63a..c5ab6de5d657 100644
--- a/libcxx/src/locale.cpp
+++ b/libcxx/src/locale.cpp
@@ -102,8 +102,6 @@ inline constexpr size_t countof(const T* const begin, const T* const end) {
return static_cast<size_t>(end - begin);
}
-} // namespace
-
string build_name(const string& other, const string& one, locale::category c) {
if (other == "*" || one == "*")
return "*";
@@ -115,6 +113,8 @@ string build_name(const string& other, const string& one, locale::category c) {
return "*";
}
+} // namespace
+
const locale::category locale::none;
const locale::category locale::collate;
const locale::category locale::ctype;
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp
new file mode 100644
index 000000000000..066ed1191dd0
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_strong.pass.cpp
@@ -0,0 +1,58 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none || libcpp-hardening-mode=fast
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// bool compare_exchange_strong(T& expected, T desired, memory_order success, memory_order failure) const noexcept;
+//
+// Preconditions: failure is memory_order::relaxed, memory_order::consume, memory_order::acquire, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "atomic_helpers.h"
+#include "check_assertion.h"
+
+template <typename T>
+struct TestCompareExchangeStrongInvalidMemoryOrder {
+ void operator()() const {
+ { // no assertion should trigger here
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ T t(T(2));
+ a.compare_exchange_strong(t, T(3), std::memory_order_relaxed, std::memory_order_relaxed);
+ }
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ T t(T(2));
+ a.compare_exchange_strong(t, T(3), std::memory_order_relaxed, std::memory_order_release);
+ }()),
+ "atomic_ref: failure memory order argument to strong atomic compare-and-exchange operation is invalid");
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ T t(T(2));
+ a.compare_exchange_strong(t, T(3), std::memory_order_relaxed, std::memory_order_acq_rel);
+ }()),
+ "atomic_ref: failure memory order argument to strong atomic compare-and-exchange operation is invalid");
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestCompareExchangeStrongInvalidMemoryOrder>()();
+ return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp
new file mode 100644
index 000000000000..e83a143df3f0
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.compare_exchange_weak.pass.cpp
@@ -0,0 +1,58 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none || libcpp-hardening-mode=fast
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// bool compare_exchange_weak(T& expected, T desired, memory_order success, memory_order failure) const noexcept;
+//
+// Preconditions: failure is memory_order::relaxed, memory_order::consume, memory_order::acquire, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "atomic_helpers.h"
+#include "check_assertion.h"
+
+template <typename T>
+struct TestCompareExchangeWeakInvalidMemoryOrder {
+ void operator()() const {
+ { // no assertion should trigger here
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ T t(T(2));
+ a.compare_exchange_weak(t, T(3), std::memory_order_relaxed, std::memory_order_relaxed);
+ }
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ T t(T(2));
+ a.compare_exchange_weak(t, T(3), std::memory_order_relaxed, std::memory_order_release);
+ }()),
+ "atomic_ref: failure memory order argument to weak atomic compare-and-exchange operation is invalid");
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ T t(T(2));
+ a.compare_exchange_weak(t, T(3), std::memory_order_relaxed, std::memory_order_acq_rel);
+ }()),
+ "atomic_ref: failure memory order argument to weak atomic compare-and-exchange operation is invalid");
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestCompareExchangeWeakInvalidMemoryOrder>()();
+ return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp
new file mode 100644
index 000000000000..ef3705d1db27
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.ctor.pass.cpp
@@ -0,0 +1,40 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none || libcpp-hardening-mode=fast
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+
+// <atomic>
+
+// atomic_ref(T& obj);
+//
+// Preconditions: The referenced object is aligned to required_alignment.
+
+#include <atomic>
+#include <cstddef>
+
+#include "check_assertion.h"
+
+int main(int, char**) {
+ { // no assertion should trigger here
+ alignas(float) std::byte c[sizeof(float)];
+ float* f = new (c) float(3.14f);
+ [[maybe_unused]] std::atomic_ref<float> r(*f);
+ }
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ alignas(float) std::byte c[2 * sizeof(float)]; // intentionally larger
+ float* f = new (c + 1) float(3.14f); // intentionally misaligned
+ [[maybe_unused]] std::atomic_ref<float> r(*f);
+ }()),
+ "atomic_ref ctor: referenced object must be aligned to required_alignment");
+
+ return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp
new file mode 100644
index 000000000000..bc92b3dc3622
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.load.pass.cpp
@@ -0,0 +1,55 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none || libcpp-hardening-mode=fast
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// T load(memory_order order = memory_order::seq_cst) const noexcept;
+//
+// Preconditions: order is memory_order::relaxed, memory_order::consume, memory_order::acquire, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "atomic_helpers.h"
+#include "check_assertion.h"
+
+template <typename T>
+struct TestLoadInvalidMemoryOrder {
+ void operator()() const {
+ { // no assertion should trigger here
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ (void)a.load(std::memory_order_relaxed);
+ }
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ (void)a.load(std::memory_order_release);
+ }()),
+ "atomic_ref: memory order argument to atomic load operation is invalid");
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ (void)a.load(std::memory_order_acq_rel);
+ }()),
+ "atomic_ref: memory order argument to atomic load operation is invalid");
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestLoadInvalidMemoryOrder>()();
+ return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp
new file mode 100644
index 000000000000..ab0d4a220c94
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.store.pass.cpp
@@ -0,0 +1,63 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none || libcpp-hardening-mode=fast
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// void store(T desired, memory_order order = memory_order::seq_cst) const noexcept;
+//
+// Preconditions: order is memory_order::relaxed, memory_order::release, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "atomic_helpers.h"
+#include "check_assertion.h"
+
+template <typename T>
+struct TestStoreInvalidMemoryOrder {
+ void operator()() const {
+ { // no assertion should trigger here
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.store(T(2), std::memory_order_relaxed);
+ }
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.store(T(2), std::memory_order_consume);
+ }()),
+ "atomic_ref: memory order argument to atomic store operation is invalid");
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.store(T(2), std::memory_order_acquire);
+ }()),
+ "atomic_ref: memory order argument to atomic store operation is invalid");
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.store(T(2), std::memory_order_acq_rel);
+ }()),
+ "atomic_ref: memory order argument to atomic store operation is invalid");
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestStoreInvalidMemoryOrder>()();
+ return 0;
+}
diff --git a/libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp b/libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp
new file mode 100644
index 000000000000..dcec2fb62854
--- /dev/null
+++ b/libcxx/test/libcxx/atomics/atomics.ref/assert.wait.pass.cpp
@@ -0,0 +1,55 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// REQUIRES: has-unix-headers
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: libcpp-hardening-mode=none || libcpp-hardening-mode=fast
+// XFAIL: libcpp-hardening-mode=debug && availability-verbose_abort-missing
+// ADDITIONAL_COMPILE_FLAGS: -Wno-user-defined-warnings
+
+// <atomic>
+
+// void wait(T old, memory_order order = memory_order::seq_cst) const noexcept;
+//
+// Preconditions: order is memory_order::relaxed, memory_order::consume, memory_order::acquire, or memory_order::seq_cst.
+
+#include <atomic>
+
+#include "atomic_helpers.h"
+#include "check_assertion.h"
+
+template <typename T>
+struct TestWaitInvalidMemoryOrder {
+ void operator()() const {
+ { // no assertion should trigger here
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.wait(T(2), std::memory_order_relaxed);
+ }
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.wait(T(2), std::memory_order_release);
+ }()),
+ "atomic_ref: memory order argument to atomic wait operation is invalid");
+
+ TEST_LIBCPP_ASSERT_FAILURE(
+ ([] {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+ a.wait(T(2), std::memory_order_acq_rel);
+ }()),
+ "atomic_ref: memory order argument to atomic wait operation is invalid");
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestWaitInvalidMemoryOrder>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp
new file mode 100644
index 000000000000..3887211752c6
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/assign.pass.cpp
@@ -0,0 +1,50 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// T operator=(T) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestAssign {
+ void operator()() const {
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ std::same_as<T> decltype(auto) y = (a = T(2));
+ assert(y == T(2));
+ assert(x == T(2));
+
+ ASSERT_NOEXCEPT(a = T(0));
+ static_assert(std::is_nothrow_assignable_v<std::atomic_ref<T>, T>);
+
+ static_assert(!std::is_copy_assignable_v<std::atomic_ref<T>>);
+ }
+
+ {
+ auto assign = [](std::atomic_ref<T> const& y, T, T new_val) { y = new_val; };
+ auto load = [](std::atomic_ref<T> const& y) { return y.load(); };
+ test_seq_cst<T>(assign, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestAssign>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp
new file mode 100644
index 000000000000..2be1e9962880
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/bitwise_and_assign.pass.cpp
@@ -0,0 +1,60 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type operator&=(integral-type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_bitwise_and_assign = requires { std::declval<T const>() &= std::declval<T>(); };
+
+template <typename T>
+struct TestDoesNotHaveBitwiseAndAssign {
+ void operator()() const { static_assert(!has_bitwise_and_assign<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestBitwiseAndAssign {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ std::same_as<T> decltype(auto) y = (a &= T(1));
+ assert(y == T(1));
+ assert(x == T(1));
+ ASSERT_NOEXCEPT(a &= T(0));
+
+ y = (a &= T(2));
+ assert(y == T(0));
+ assert(x == T(0));
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestBitwiseAndAssign>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveBitwiseAndAssign>()();
+
+ TestEachPointerType<TestDoesNotHaveBitwiseAndAssign>()();
+
+ TestDoesNotHaveBitwiseAndAssign<bool>()();
+ TestDoesNotHaveBitwiseAndAssign<UserAtomicType>()();
+ TestDoesNotHaveBitwiseAndAssign<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp
new file mode 100644
index 000000000000..5c22c8a2b2b6
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/bitwise_or_assign.pass.cpp
@@ -0,0 +1,56 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type operator|=(integral-type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_bitwise_or_assign = requires { std::declval<T const>() |= std::declval<T>(); };
+
+template < typename T>
+struct TestDoesNotHaveBitwiseOrAssign {
+ void operator()() const { static_assert(!has_bitwise_or_assign<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestBitwiseOrAssign {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ std::same_as<T> decltype(auto) y = (a |= T(2));
+ assert(y == T(3));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a |= T(0));
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestBitwiseOrAssign>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveBitwiseOrAssign>()();
+
+ TestEachPointerType<TestDoesNotHaveBitwiseOrAssign>()();
+
+ TestDoesNotHaveBitwiseOrAssign<bool>()();
+ TestDoesNotHaveBitwiseOrAssign<UserAtomicType>()();
+ TestDoesNotHaveBitwiseOrAssign<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp b/libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp
new file mode 100644
index 000000000000..4dc4fd307f58
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/bitwise_xor_assign.pass.cpp
@@ -0,0 +1,56 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type operator|=(integral-type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_bitwise_xor_assign = requires { std::declval<T const>() ^= std::declval<T>(); };
+
+template <typename T>
+struct TestDoesNotHaveBitwiseXorAssign {
+ void operator()() const { static_assert(!has_bitwise_xor_assign<std::atomic_ref<float>>); }
+};
+
+template <typename T>
+struct TestBitwiseXorAssign {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ std::same_as<T> decltype(auto) y = (a ^= T(2));
+ assert(y == T(3));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a ^= T(0));
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestBitwiseXorAssign>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveBitwiseXorAssign>()();
+
+ TestEachPointerType<TestDoesNotHaveBitwiseXorAssign>()();
+
+ TestDoesNotHaveBitwiseXorAssign<bool>()();
+ TestDoesNotHaveBitwiseXorAssign<UserAtomicType>()();
+ TestDoesNotHaveBitwiseXorAssign<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp b/libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp
new file mode 100644
index 000000000000..72b2f444c476
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/compare_exchange_strong.pass.cpp
@@ -0,0 +1,221 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// bool compare_exchange_strong(T&, T, memory_order, memory_order) const noexcept;
+// bool compare_exchange_strong(T&, T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestCompareExchangeStrong {
+ void operator()() const {
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ T t(T(1));
+ std::same_as<bool> decltype(auto) y = a.compare_exchange_strong(t, T(2));
+ assert(y == true);
+ assert(a == T(2));
+ assert(t == T(1));
+ y = a.compare_exchange_strong(t, T(3));
+ assert(y == false);
+ assert(a == T(2));
+ assert(t == T(2));
+
+ ASSERT_NOEXCEPT(a.compare_exchange_strong(t, T(2)));
+ }
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ T t(T(1));
+ std::same_as<bool> decltype(auto) y = a.compare_exchange_strong(t, T(2), std::memory_order_seq_cst);
+ assert(y == true);
+ assert(a == T(2));
+ assert(t == T(1));
+ y = a.compare_exchange_strong(t, T(3), std::memory_order_seq_cst);
+ assert(y == false);
+ assert(a == T(2));
+ assert(t == T(2));
+
+ ASSERT_NOEXCEPT(a.compare_exchange_strong(t, T(2), std::memory_order_seq_cst));
+ }
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ T t(T(1));
+ std::same_as<bool> decltype(auto) y =
+ a.compare_exchange_strong(t, T(2), std::memory_order_release, std::memory_order_relaxed);
+ assert(y == true);
+ assert(a == T(2));
+ assert(t == T(1));
+ y = a.compare_exchange_strong(t, T(3), std::memory_order_release, std::memory_order_relaxed);
+ assert(y == false);
+ assert(a == T(2));
+ assert(t == T(2));
+
+ ASSERT_NOEXCEPT(a.compare_exchange_strong(t, T(2), std::memory_order_release, std::memory_order_relaxed));
+ }
+
+ // success memory_order::release
+ {
+ auto store = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::release, std::memory_order::relaxed);
+ assert(r);
+ };
+
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T>(store, load);
+
+ auto store_one_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::release);
+ assert(r);
+ };
+ test_acquire_release<T>(store_one_arg, load);
+ }
+
+ // success memory_order::acquire
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::acquire, std::memory_order::relaxed)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store, load);
+
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::acquire)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store, load_one_arg);
+ }
+
+ // success memory_order::acq_rel
+ {
+ auto store = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::acq_rel, std::memory_order::relaxed);
+ assert(r);
+ };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::acq_rel, std::memory_order::relaxed)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store, load);
+
+ auto store_one_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::acq_rel);
+ assert(r);
+ };
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::acq_rel)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store_one_arg, load_one_arg);
+ }
+
+ // success memory_order::seq_cst
+ {
+ auto store = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::seq_cst, std::memory_order::relaxed);
+ assert(r);
+ };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::seq_cst, std::memory_order::relaxed)) {
+ }
+ return val;
+ };
+ test_seq_cst<T>(store, load);
+
+ auto store_one_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ auto r = x.compare_exchange_strong(old_val, new_val, std::memory_order::seq_cst);
+ assert(r);
+ };
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_strong(val, val, std::memory_order::seq_cst)) {
+ }
+ return val;
+ };
+ test_seq_cst<T>(store_one_arg, load_one_arg);
+ }
+
+ // failure memory_order::acquire
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r =
+ x.compare_exchange_strong(unexpected, unexpected, std::memory_order::relaxed, std::memory_order::acquire);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T>(store, load);
+
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r = x.compare_exchange_strong(unexpected, unexpected, std::memory_order::acquire);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T>(store, load_one_arg);
+
+ // acq_rel replaced by acquire
+ auto load_one_arg_acq_rel = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r = x.compare_exchange_strong(unexpected, unexpected, std::memory_order::acq_rel);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T>(store, load_one_arg_acq_rel);
+ }
+
+ // failure memory_order::seq_cst
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::seq_cst); };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r =
+ x.compare_exchange_strong(unexpected, unexpected, std::memory_order::relaxed, std::memory_order::seq_cst);
+ assert(!r);
+ return result;
+ };
+ test_seq_cst<T>(store, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestCompareExchangeStrong>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp b/libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp
new file mode 100644
index 000000000000..5219a8e3714f
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/compare_exchange_weak.pass.cpp
@@ -0,0 +1,226 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// bool compare_exchange_weak(T&, T, memory_order, memory_order) const noexcept;
+// bool compare_exchange_weak(T&, T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <concepts>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestCompareExchangeWeak {
+ void operator()() const {
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ T t(T(1));
+ std::same_as<bool> decltype(auto) y = a.compare_exchange_weak(t, T(2));
+ assert(y == true);
+ assert(a == T(2));
+ assert(t == T(1));
+ y = a.compare_exchange_weak(t, T(3));
+ assert(y == false);
+ assert(a == T(2));
+ assert(t == T(2));
+
+ ASSERT_NOEXCEPT(a.compare_exchange_weak(t, T(2)));
+ }
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ T t(T(1));
+ std::same_as<bool> decltype(auto) y = a.compare_exchange_weak(t, T(2), std::memory_order_seq_cst);
+ assert(y == true);
+ assert(a == T(2));
+ assert(t == T(1));
+ y = a.compare_exchange_weak(t, T(3), std::memory_order_seq_cst);
+ assert(y == false);
+ assert(a == T(2));
+ assert(t == T(2));
+
+ ASSERT_NOEXCEPT(a.compare_exchange_weak(t, T(2), std::memory_order_seq_cst));
+ }
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ T t(T(1));
+ std::same_as<bool> decltype(auto) y =
+ a.compare_exchange_weak(t, T(2), std::memory_order_release, std::memory_order_relaxed);
+ assert(y == true);
+ assert(a == T(2));
+ assert(t == T(1));
+ y = a.compare_exchange_weak(t, T(3), std::memory_order_release, std::memory_order_relaxed);
+ assert(y == false);
+ assert(a == T(2));
+ assert(t == T(2));
+
+ ASSERT_NOEXCEPT(a.compare_exchange_weak(t, T(2), std::memory_order_release, std::memory_order_relaxed));
+ }
+
+ // success memory_order::release
+ {
+ auto store = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::release, std::memory_order::relaxed)) {
+ }
+ };
+
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T>(store, load);
+
+ auto store_one_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::release)) {
+ }
+ };
+ test_acquire_release<T>(store_one_arg, load);
+ }
+
+ // success memory_order::acquire
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::acquire, std::memory_order::relaxed)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store, load);
+
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::acquire)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store, load_one_arg);
+ }
+
+ // success memory_order::acq_rel
+ {
+ auto store = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::acq_rel, std::memory_order::relaxed)) {
+ }
+ };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::acq_rel, std::memory_order::relaxed)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store, load);
+
+ auto store_one_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::acq_rel)) {
+ }
+ };
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::acq_rel)) {
+ }
+ return val;
+ };
+ test_acquire_release<T>(store_one_arg, load_one_arg);
+ }
+
+ // success memory_order::seq_cst
+ {
+ auto store = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::seq_cst, std::memory_order::relaxed)) {
+ }
+ };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::seq_cst, std::memory_order::relaxed)) {
+ }
+ return val;
+ };
+ test_seq_cst<T>(store, load);
+
+ auto store_one_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ // could fail spuriously, so put it in a loop
+ while (!x.compare_exchange_weak(old_val, new_val, std::memory_order::seq_cst)) {
+ }
+ };
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto val = x.load(std::memory_order::relaxed);
+ while (!x.compare_exchange_weak(val, val, std::memory_order::seq_cst)) {
+ }
+ return val;
+ };
+ test_seq_cst<T>(store_one_arg, load_one_arg);
+ }
+
+ // failure memory_order::acquire
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r =
+ x.compare_exchange_weak(unexpected, unexpected, std::memory_order::relaxed, std::memory_order::acquire);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T>(store, load);
+
+ auto load_one_arg = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r = x.compare_exchange_weak(unexpected, unexpected, std::memory_order::acquire);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T>(store, load_one_arg);
+
+ // acq_rel replaced by acquire
+ auto load_one_arg_acq_rel = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r = x.compare_exchange_weak(unexpected, unexpected, std::memory_order::acq_rel);
+ assert(!r);
+ return result;
+ };
+ test_acquire_release<T>(store, load_one_arg_acq_rel);
+ }
+
+ // failure memory_order::seq_cst
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::seq_cst); };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ T unexpected(T(255));
+ bool r =
+ x.compare_exchange_weak(unexpected, unexpected, std::memory_order::relaxed, std::memory_order::seq_cst);
+ assert(!r);
+ return result;
+ };
+ test_seq_cst<T>(store, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestCompareExchangeWeak>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/convert.pass.cpp b/libcxx/test/std/atomics/atomics.ref/convert.pass.cpp
new file mode 100644
index 000000000000..2a58a5ea6ae2
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/convert.pass.cpp
@@ -0,0 +1,45 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// operator T() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestConvert {
+ void operator()() const {
+ T x(T(1));
+
+ T copy = x;
+ std::atomic_ref<T> const a(copy);
+
+ T converted = a;
+ assert(converted == x);
+
+ ASSERT_NOEXCEPT(T(a));
+ static_assert(std::is_nothrow_convertible_v<std::atomic_ref<T>, T>);
+
+ auto store = [](std::atomic_ref<T> const& y, T, T new_val) { y.store(new_val); };
+ auto load = [](std::atomic_ref<T> const& y) { return static_cast<T>(y); };
+ test_seq_cst<T>(store, load);
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestConvert>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp b/libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp
new file mode 100644
index 000000000000..d6c647406abf
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/ctor.pass.cpp
@@ -0,0 +1,37 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// <atomic>
+
+// explicit atomic_ref(T&);
+
+#include <atomic>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestCtor {
+ void operator()() const {
+ // check that the constructor is explicit
+ static_assert(!std::is_convertible_v<T, std::atomic_ref<T>>);
+ static_assert(std::is_constructible_v<std::atomic_ref<T>, T&>);
+
+ T x(T(0));
+ std::atomic_ref<T> a(x);
+ (void)a;
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestCtor>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp b/libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp
new file mode 100644
index 000000000000..24a399ac4711
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/deduction.pass.cpp
@@ -0,0 +1,33 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// <atomic>
+
+// explicit atomic_ref(T&);
+
+#include <atomic>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestDeduction {
+ void operator()() const {
+ T x(T(0));
+ std::atomic_ref a(x);
+ ASSERT_SAME_TYPE(decltype(a), std::atomic_ref<T>);
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestDeduction>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp b/libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp
new file mode 100644
index 000000000000..cd998d46b7e8
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/exchange.pass.cpp
@@ -0,0 +1,45 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// T exchange(T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestExchange {
+ void operator()() const {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.exchange(T(2));
+ assert(y == T(1));
+ ASSERT_NOEXCEPT(a.exchange(T(2)));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.exchange(T(3), std::memory_order_seq_cst);
+ assert(y == T(2));
+ ASSERT_NOEXCEPT(a.exchange(T(3), std::memory_order_seq_cst));
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestExchange>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp
new file mode 100644
index 000000000000..908a6879bd06
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_add.pass.cpp
@@ -0,0 +1,113 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type fetch_add(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+// floating-point-type fetch_add(floating-point-type, memory_order = memory_order::seq_cst) const noexcept;
+// T* fetch_add(difference_type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_add = requires {
+ std::declval<T const>().fetch_add(std::declval<T>());
+ std::declval<T const>().fetch_add(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+template <typename T>
+struct TestDoesNotHaveFetchAdd {
+ void operator()() const { static_assert(!has_fetch_add<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestFetchAdd {
+ void operator()() const {
+ if constexpr (std::is_arithmetic_v<T>) {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_add(T(2));
+ assert(y == T(1));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a.fetch_add(T(0)));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_add(T(4), std::memory_order_relaxed);
+ assert(y == T(3));
+ assert(x == T(7));
+ ASSERT_NOEXCEPT(a.fetch_add(T(0), std::memory_order_relaxed));
+ }
+ } else if constexpr (std::is_pointer_v<T>) {
+ using U = std::remove_pointer_t<T>;
+ U t[9] = {};
+ T p{&t[1]};
+ std::atomic_ref<T> const a(p);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_add(2);
+ assert(y == &t[1]);
+ assert(a == &t[3]);
+ ASSERT_NOEXCEPT(a.fetch_add(0));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_add(4, std::memory_order_relaxed);
+ assert(y == &t[3]);
+ assert(a == &t[7]);
+ ASSERT_NOEXCEPT(a.fetch_add(0, std::memory_order_relaxed));
+ }
+ } else {
+ static_assert(std::is_void_v<T>);
+ }
+
+ // memory_order::release
+ {
+ auto fetch_add = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ x.fetch_add(new_val - old_val, std::memory_order::release);
+ };
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T>(fetch_add, load);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto fetch_add_no_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) { x.fetch_add(new_val - old_val); };
+ auto fetch_add_with_order = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ x.fetch_add(new_val - old_val, std::memory_order::seq_cst);
+ };
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(); };
+ test_seq_cst<T>(fetch_add_no_arg, load);
+ test_seq_cst<T>(fetch_add_with_order, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestFetchAdd>()();
+
+ TestFetchAdd<float>()();
+ TestFetchAdd<double>()();
+
+ TestEachPointerType<TestFetchAdd>()();
+
+ TestDoesNotHaveFetchAdd<bool>()();
+ TestDoesNotHaveFetchAdd<UserAtomicType>()();
+ TestDoesNotHaveFetchAdd<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp
new file mode 100644
index 000000000000..8f0bec21fe72
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_and.pass.cpp
@@ -0,0 +1,69 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type fetch_and(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_and = requires {
+ std::declval<T const>().fetch_and(std::declval<T>());
+ std::declval<T const>().fetch_and(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+template <typename T>
+struct TestDoesNotHaveFetchAnd {
+ void operator()() const { static_assert(!has_fetch_and<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestFetchAnd {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_and(T(2));
+ assert(y == T(1));
+ assert(x == T(0));
+ ASSERT_NOEXCEPT(a.fetch_and(T(0)));
+ }
+
+ x = T(1);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_and(T(2), std::memory_order_relaxed);
+ assert(y == T(1));
+ assert(x == T(0));
+ ASSERT_NOEXCEPT(a.fetch_and(T(0), std::memory_order_relaxed));
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestFetchAnd>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveFetchAnd>()();
+
+ TestEachPointerType<TestDoesNotHaveFetchAnd>()();
+
+ TestDoesNotHaveFetchAnd<bool>()();
+ TestDoesNotHaveFetchAnd<UserAtomicType>()();
+ TestDoesNotHaveFetchAnd<LargeUserAtomicType>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp
new file mode 100644
index 000000000000..2045868fde42
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_or.pass.cpp
@@ -0,0 +1,68 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type fetch_or(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <concepts>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_or = requires {
+ std::declval<T const>().fetch_or(std::declval<T>());
+ std::declval<T const>().fetch_or(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+template <typename T>
+struct TestDoesNotHaveFetchOr {
+ void operator()() const { static_assert(!has_fetch_or<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestFetchOr {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_or(T(2));
+ assert(y == T(1));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a.fetch_or(T(0)));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_or(T(2), std::memory_order_relaxed);
+ assert(y == T(3));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a.fetch_or(T(0), std::memory_order_relaxed));
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestFetchOr>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveFetchOr>()();
+
+ TestEachPointerType<TestDoesNotHaveFetchOr>()();
+
+ TestDoesNotHaveFetchOr<bool>()();
+ TestDoesNotHaveFetchOr<UserAtomicType>()();
+ TestDoesNotHaveFetchOr<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp
new file mode 100644
index 000000000000..545604530ada
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_sub.pass.cpp
@@ -0,0 +1,113 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type fetch_sub(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+// floating-point-type fetch_sub(floating-point-type, memory_order = memory_order::seq_cst) const noexcept;
+// T* fetch_sub(difference_type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_sub = requires {
+ std::declval<T const>().fetch_sub(std::declval<T>());
+ std::declval<T const>().fetch_sub(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+template <typename T>
+struct TestDoesNotHaveFetchSub {
+ void operator()() const { static_assert(!has_fetch_sub<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestFetchSub {
+ void operator()() const {
+ if constexpr (std::is_arithmetic_v<T>) {
+ T x(T(7));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_sub(T(4));
+ assert(y == T(7));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a.fetch_sub(T(0)));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_sub(T(2), std::memory_order_relaxed);
+ assert(y == T(3));
+ assert(x == T(1));
+ ASSERT_NOEXCEPT(a.fetch_sub(T(0), std::memory_order_relaxed));
+ }
+ } else if constexpr (std::is_pointer_v<T>) {
+ using U = std::remove_pointer_t<T>;
+ U t[9] = {};
+ T p{&t[7]};
+ std::atomic_ref<T> const a(p);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_sub(4);
+ assert(y == &t[7]);
+ assert(a == &t[3]);
+ ASSERT_NOEXCEPT(a.fetch_sub(0));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_sub(2, std::memory_order_relaxed);
+ assert(y == &t[3]);
+ assert(a == &t[1]);
+ ASSERT_NOEXCEPT(a.fetch_sub(0, std::memory_order_relaxed));
+ }
+ } else {
+ static_assert(std::is_void_v<T>);
+ }
+
+ // memory_order::release
+ {
+ auto fetch_sub = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ x.fetch_sub(old_val - new_val, std::memory_order::release);
+ };
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(std::memory_order::acquire); };
+ test_acquire_release<T>(fetch_sub, load);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto fetch_sub_no_arg = [](std::atomic_ref<T> const& x, T old_val, T new_val) { x.fetch_sub(old_val - new_val); };
+ auto fetch_sub_with_order = [](std::atomic_ref<T> const& x, T old_val, T new_val) {
+ x.fetch_sub(old_val - new_val, std::memory_order::seq_cst);
+ };
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(); };
+ test_seq_cst<T>(fetch_sub_no_arg, load);
+ test_seq_cst<T>(fetch_sub_with_order, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestFetchSub>()();
+
+ TestFetchSub<float>()();
+ TestFetchSub<double>()();
+
+ TestEachPointerType<TestFetchSub>()();
+
+ TestDoesNotHaveFetchSub<bool>()();
+ TestDoesNotHaveFetchSub<UserAtomicType>()();
+ TestDoesNotHaveFetchSub<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp b/libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp
new file mode 100644
index 000000000000..aade87f961f1
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/fetch_xor.pass.cpp
@@ -0,0 +1,68 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type fetch_xor(integral-type, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <concepts>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_fetch_xor = requires {
+ std::declval<T const>().fetch_xor(std::declval<T>());
+ std::declval<T const>().fetch_xor(std::declval<T>(), std::declval<std::memory_order>());
+};
+
+template <typename T>
+struct TestDoesNotHaveFetchXor {
+ void operator()() const { static_assert(!has_fetch_xor<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestFetchXor {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_xor(T(2));
+ assert(y == T(1));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a.fetch_xor(T(0)));
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.fetch_xor(T(2), std::memory_order_relaxed);
+ assert(y == T(3));
+ assert(x == T(1));
+ ASSERT_NOEXCEPT(a.fetch_xor(T(0), std::memory_order_relaxed));
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestFetchXor>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveFetchXor>()();
+
+ TestEachPointerType<TestDoesNotHaveFetchXor>()();
+
+ TestDoesNotHaveFetchXor<bool>()();
+ TestDoesNotHaveFetchXor<UserAtomicType>()();
+ TestDoesNotHaveFetchXor<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp b/libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp
new file mode 100644
index 000000000000..c84c89b4d2b4
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/increment_decrement.pass.cpp
@@ -0,0 +1,97 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type operator++(int) const noexcept;
+// integral-type operator--(int) const noexcept;
+// integral-type operator++() const noexcept;
+// integral-type operator--() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_pre_increment_operator = requires { ++std::declval<T const>(); };
+
+template <typename T>
+concept has_post_increment_operator = requires { std::declval<T const>()++; };
+
+template <typename T>
+concept has_pre_decrement_operator = requires { --std::declval<T const>(); };
+
+template <typename T>
+concept has_post_decrement_operator = requires { std::declval<T const>()--; };
+
+template <typename T>
+constexpr bool does_not_have_increment_nor_decrement_operators() {
+ return !has_pre_increment_operator<T> && !has_pre_decrement_operator<T> && !has_post_increment_operator<T> &&
+ !has_post_decrement_operator<T>;
+}
+
+template <typename T>
+struct TestDoesNotHaveIncrementDecrement {
+ void operator()() const { static_assert(does_not_have_increment_nor_decrement_operators<T>()); }
+};
+
+template <typename T>
+struct TestIncrementDecrement {
+ void operator()() const {
+ static_assert(std::is_integral_v<T>);
+
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = ++a;
+ assert(y == T(2));
+ assert(x == T(2));
+ ASSERT_NOEXCEPT(++a);
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = --a;
+ assert(y == T(1));
+ assert(x == T(1));
+ ASSERT_NOEXCEPT(--a);
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a++;
+ assert(y == T(1));
+ assert(x == T(2));
+ ASSERT_NOEXCEPT(a++);
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a--;
+ assert(y == T(2));
+ assert(x == T(1));
+ ASSERT_NOEXCEPT(a--);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestIncrementDecrement>()();
+
+ TestEachFloatingPointType<TestDoesNotHaveIncrementDecrement>()();
+
+ TestEachPointerType<TestDoesNotHaveIncrementDecrement>()();
+
+ TestDoesNotHaveIncrementDecrement<bool>()();
+ TestDoesNotHaveIncrementDecrement<UserAtomicType>()();
+ TestDoesNotHaveIncrementDecrement<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp b/libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp
new file mode 100644
index 000000000000..94f65e3b4b66
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/is_always_lock_free.pass.cpp
@@ -0,0 +1,71 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// <atomic>
+
+// static constexpr bool is_always_lock_free;
+// bool is_lock_free() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+
+#include "test_macros.h"
+
+template <typename T>
+void check_always_lock_free(std::atomic_ref<T> const a) {
+ std::same_as<const bool> decltype(auto) is_always_lock_free = std::atomic_ref<T>::is_always_lock_free;
+ if (is_always_lock_free) {
+ std::same_as<bool> decltype(auto) is_lock_free = a.is_lock_free();
+ assert(is_lock_free);
+ }
+ ASSERT_NOEXCEPT(a.is_lock_free());
+}
+
+#define CHECK_ALWAYS_LOCK_FREE(T) \
+ do { \
+ typedef T type; \
+ type obj{}; \
+ check_always_lock_free(std::atomic_ref<type>(obj)); \
+ } while (0)
+
+void test() {
+ int i = 0;
+ check_always_lock_free(std::atomic_ref<int>(i));
+
+ float f = 0.f;
+ check_always_lock_free(std::atomic_ref<float>(f));
+
+ int* p = &i;
+ check_always_lock_free(std::atomic_ref<int*>(p));
+
+ CHECK_ALWAYS_LOCK_FREE(struct Empty{});
+ CHECK_ALWAYS_LOCK_FREE(struct OneInt { int i; });
+ CHECK_ALWAYS_LOCK_FREE(struct IntArr2 { int i[2]; });
+ CHECK_ALWAYS_LOCK_FREE(struct FloatArr3 { float i[3]; });
+ CHECK_ALWAYS_LOCK_FREE(struct LLIArr2 { long long int i[2]; });
+ CHECK_ALWAYS_LOCK_FREE(struct LLIArr4 { long long int i[4]; });
+ CHECK_ALWAYS_LOCK_FREE(struct LLIArr8 { long long int i[8]; });
+ CHECK_ALWAYS_LOCK_FREE(struct LLIArr16 { long long int i[16]; });
+ CHECK_ALWAYS_LOCK_FREE(struct Padding {
+ char c; /* padding */
+ long long int i;
+ });
+ CHECK_ALWAYS_LOCK_FREE(union IntFloat {
+ int i;
+ float f;
+ });
+ CHECK_ALWAYS_LOCK_FREE(enum class CharEnumClass : char{foo});
+}
+
+int main(int, char**) {
+ test();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/load.pass.cpp b/libcxx/test/std/atomics/atomics.ref/load.pass.cpp
new file mode 100644
index 000000000000..feed0fbaed84
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/load.pass.cpp
@@ -0,0 +1,62 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// T load(memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <concepts>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestLoad {
+ void operator()() const {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ {
+ std::same_as<T> decltype(auto) y = a.load();
+ assert(y == T(1));
+ ASSERT_NOEXCEPT(a.load());
+ }
+
+ {
+ std::same_as<T> decltype(auto) y = a.load(std::memory_order_seq_cst);
+ assert(y == T(1));
+ ASSERT_NOEXCEPT(a.load(std::memory_order_seq_cst));
+ }
+
+ // memory_order::seq_cst
+ {
+ auto store = [](std::atomic_ref<T> const& y, T, T new_val) { y.store(new_val); };
+ auto load_no_arg = [](std::atomic_ref<T> const& y) { return y.load(); };
+ auto load_with_order = [](std::atomic_ref<T> const& y) { return y.load(std::memory_order::seq_cst); };
+ test_seq_cst<T>(store, load_no_arg);
+ test_seq_cst<T>(store, load_with_order);
+ }
+
+ // memory_order::release
+ {
+ auto store = [](std::atomic_ref<T> const& y, T, T new_val) { y.store(new_val, std::memory_order::release); };
+ auto load = [](std::atomic_ref<T> const& y) { return y.load(std::memory_order::acquire); };
+ test_acquire_release<T>(store, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestLoad>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp b/libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp
new file mode 100644
index 000000000000..d4e2f0126d62
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/member_types.pass.cpp
@@ -0,0 +1,132 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// UNSUPPORTED: c++03, c++11, c++14, c++17, c++20
+
+// <atomic>
+
+// template <class T>
+// struct atomic_ref
+// {
+// using value_type = T;
+// using difference_type = value_type; // only for atomic_ref<Integral> and
+// // atomic_ref<Floating> specializations
+// using difference_type = std::ptrdiff_t; // only for atomic_ref<T*> specializations
+//
+// explicit atomic_ref(T&);
+// atomic_ref(const atomic_ref&) noexcept;
+// atomic_ref& operator=(const atomic_ref&) = delete;
+// };
+
+#include <atomic>
+#include <type_traits>
+
+#include "test_macros.h"
+
+template <class T>
+concept has_difference_type = requires { typename T::difference_type; };
+
+template <class T>
+void check_member_types() {
+ if constexpr ((std::is_integral_v<T> && !std::is_same_v<T, bool>) || std::is_floating_point_v<T>) {
+ ASSERT_SAME_TYPE(typename std::atomic_ref<T>::value_type, T);
+ ASSERT_SAME_TYPE(typename std::atomic_ref<T>::difference_type, T);
+ } else if constexpr (std::is_pointer_v<T>) {
+ ASSERT_SAME_TYPE(typename std::atomic_ref<T>::value_type, T);
+ ASSERT_SAME_TYPE(typename std::atomic_ref<T>::difference_type, std::ptrdiff_t);
+ } else {
+ ASSERT_SAME_TYPE(typename std::atomic_ref<T>::value_type, T);
+ static_assert(!has_difference_type<std::atomic_ref<T>>);
+ }
+}
+
+template <class T>
+void test() {
+ // value_type and difference_type (except for primary template)
+ check_member_types<T>();
+
+ static_assert(std::is_nothrow_copy_constructible_v<std::atomic_ref<T>>);
+
+ static_assert(!std::is_copy_assignable_v<std::atomic_ref<T>>);
+
+ // explicit constructor
+ static_assert(!std::is_convertible_v<T, std::atomic_ref<T>>);
+ static_assert(std::is_constructible_v<std::atomic_ref<T>, T&>);
+}
+
+void testall() {
+ // Primary template
+ struct Empty {};
+ test<Empty>();
+ struct Trivial {
+ int a;
+ float b;
+ };
+ test<Trivial>();
+ test<bool>();
+
+ // Partial specialization for pointer types
+ test<void*>();
+
+ // Specialization for integral types
+ // + character types
+ test<char>();
+ test<char8_t>();
+ test<char16_t>();
+ test<char32_t>();
+ test<wchar_t>();
+ // + standard signed integer types
+ test<signed char>();
+ test<short>();
+ test<int>();
+ test<long>();
+ test<long long>();
+ // + standard unsigned integer types
+ test<unsigned char>();
+ test<unsigned short>();
+ test<unsigned int>();
+ test<unsigned long>();
+ test<unsigned long long>();
+ // + any other types needed by the typedefs in the header <cstdint>
+ test<int8_t>();
+ test<int16_t>();
+ test<int32_t>();
+ test<int64_t>();
+ test<int_fast8_t>();
+ test<int_fast16_t>();
+ test<int_fast32_t>();
+ test<int_fast64_t>();
+ test<int_least8_t>();
+ test<int_least16_t>();
+ test<int_least32_t>();
+ test<int_least64_t>();
+ test<intmax_t>();
+ test<intptr_t>();
+ test<uint8_t>();
+ test<uint16_t>();
+ test<uint32_t>();
+ test<uint64_t>();
+ test<uint_fast8_t>();
+ test<uint_fast16_t>();
+ test<uint_fast32_t>();
+ test<uint_fast64_t>();
+ test<uint_least8_t>();
+ test<uint_least16_t>();
+ test<uint_least32_t>();
+ test<uint_least64_t>();
+ test<uintmax_t>();
+ test<uintptr_t>();
+
+ // Specialization for floating-point types
+ // + floating-point types
+ test<float>();
+ test<double>();
+ test<long double>();
+ // + TODO extended floating-point types
+}
+
+int main(int, char**) { return 0; }
diff --git a/libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp b/libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp
new file mode 100644
index 000000000000..382b19f8c1d7
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/notify_all.pass.cpp
@@ -0,0 +1,78 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: no-threads
+// XFAIL: availability-synchronization_library-missing
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// void notify_all() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <thread>
+#include <type_traits>
+#include <vector>
+
+#include "atomic_helpers.h"
+#include "make_test_thread.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestNotifyAll {
+ void operator()() const {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ bool done = false;
+ std::atomic<int> started_num = 0;
+ std::atomic<int> wait_done_num = 0;
+
+ constexpr auto number_of_threads = 8;
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+
+ for (auto j = 0; j < number_of_threads; ++j) {
+ threads.push_back(support::make_test_thread([&a, &started_num, &done, &wait_done_num] {
+ started_num.fetch_add(1, std::memory_order::relaxed);
+
+ a.wait(T(1));
+ wait_done_num.fetch_add(1, std::memory_order::relaxed);
+
+ // likely to fail if wait did not block
+ assert(done);
+ }));
+ }
+
+ while (started_num.load(std::memory_order::relaxed) != number_of_threads) {
+ std::this_thread::yield();
+ }
+
+ std::this_thread::sleep_for(std::chrono::milliseconds(1));
+
+ done = true;
+ a.store(T(3));
+ a.notify_all();
+
+ // notify_all should unblock all the threads so that the loop below won't stuck
+ while (wait_done_num.load(std::memory_order::relaxed) != number_of_threads) {
+ std::this_thread::yield();
+ }
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+
+ ASSERT_NOEXCEPT(a.notify_all());
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestNotifyAll>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp b/libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp
new file mode 100644
index 000000000000..611e67417e4d
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/notify_one.pass.cpp
@@ -0,0 +1,46 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: no-threads
+// XFAIL: availability-synchronization_library-missing
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// void notify_one() const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <thread>
+#include <type_traits>
+#include <vector>
+
+#include "atomic_helpers.h"
+#include "make_test_thread.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestNotifyOne {
+ void operator()() const {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ std::thread t = support::make_test_thread([&]() {
+ a.store(T(3));
+ a.notify_one();
+ });
+ a.wait(T(1));
+ assert(a.load() == T(3));
+ t.join();
+ ASSERT_NOEXCEPT(a.notify_one());
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestNotifyOne>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp b/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp
new file mode 100644
index 000000000000..571d626035fa
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/operator_minus_equals.pass.cpp
@@ -0,0 +1,79 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type operator-=(integral-type) const noexcept;
+// floating-point-type operator-=(floating-point-type) const noexcept;
+// T* operator-=(difference_type) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_operator_minus_equals = requires { std::declval<T const>() -= std::declval<T>(); };
+
+template <typename T>
+struct TestDoesNotHaveOperatorMinusEquals {
+ void operator()() const { static_assert(!has_operator_minus_equals<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestOperatorMinusEquals {
+ void operator()() const {
+ if constexpr (std::is_arithmetic_v<T>) {
+ T x(T(3));
+ std::atomic_ref<T> const a(x);
+
+ std::same_as<T> decltype(auto) y = (a -= T(2));
+ assert(y == T(1));
+ assert(x == T(1));
+ ASSERT_NOEXCEPT(a -= T(0));
+ } else if constexpr (std::is_pointer_v<T>) {
+ using U = std::remove_pointer_t<T>;
+ U t[9] = {};
+ T p{&t[3]};
+ std::atomic_ref<T> const a(p);
+
+ std::same_as<T> decltype(auto) y = (a -= 2);
+ assert(y == &t[1]);
+ assert(a == &t[1]);
+ ASSERT_NOEXCEPT(a -= 0);
+ } else {
+ static_assert(std::is_void_v<T>);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto minus_equals = [](std::atomic_ref<T> const& x, T old_val, T new_val) { x -= (old_val - new_val); };
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(); };
+ test_seq_cst<T>(minus_equals, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestOperatorMinusEquals>()();
+
+ TestOperatorMinusEquals<float>()();
+ TestOperatorMinusEquals<double>()();
+
+ TestEachPointerType<TestOperatorMinusEquals>()();
+
+ TestDoesNotHaveOperatorMinusEquals<bool>()();
+ TestDoesNotHaveOperatorMinusEquals<UserAtomicType>()();
+ TestDoesNotHaveOperatorMinusEquals<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp b/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp
new file mode 100644
index 000000000000..de48ea56f57f
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/operator_plus_equals.pass.cpp
@@ -0,0 +1,79 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+
+// integral-type operator+=(integral-type) const noexcept;
+// floating-point-type operator+=(floating-point-type) const noexcept;
+// T* operator+=(difference_type) const noexcept;
+
+#include <atomic>
+#include <concepts>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+concept has_operator_plus_equals = requires { std::declval<T const>() += std::declval<T>(); };
+
+template <typename T>
+struct TestDoesNotHaveOperatorPlusEquals {
+ void operator()() const { static_assert(!has_operator_plus_equals<std::atomic_ref<T>>); }
+};
+
+template <typename T>
+struct TestOperatorPlusEquals {
+ void operator()() const {
+ if constexpr (std::is_arithmetic_v<T>) {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ std::same_as<T> decltype(auto) y = (a += T(2));
+ assert(y == T(3));
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a += T(0));
+ } else if constexpr (std::is_pointer_v<T>) {
+ using U = std::remove_pointer_t<T>;
+ U t[9] = {};
+ T p{&t[1]};
+ std::atomic_ref<T> const a(p);
+
+ std::same_as<T> decltype(auto) y = (a += 2);
+ assert(y == &t[3]);
+ assert(a == &t[3]);
+ ASSERT_NOEXCEPT(a += 0);
+ } else {
+ static_assert(std::is_void_v<T>);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto plus_equals = [](std::atomic_ref<T> const& x, T old_val, T new_val) { x += (new_val - old_val); };
+ auto load = [](std::atomic_ref<T> const& x) { return x.load(); };
+ test_seq_cst<T>(plus_equals, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachIntegralType<TestOperatorPlusEquals>()();
+
+ TestOperatorPlusEquals<float>()();
+ TestOperatorPlusEquals<double>()();
+
+ TestEachPointerType<TestOperatorPlusEquals>()();
+
+ TestDoesNotHaveOperatorPlusEquals<bool>()();
+ TestDoesNotHaveOperatorPlusEquals<UserAtomicType>()();
+ TestDoesNotHaveOperatorPlusEquals<LargeUserAtomicType>()();
+
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp b/libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp
new file mode 100644
index 000000000000..86e0cba4dbf0
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/required_alignment.pass.cpp
@@ -0,0 +1,39 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// static constexpr size_t required_alignment;
+
+#include <atomic>
+#include <cassert>
+#include <concepts>
+
+template <typename T>
+constexpr void check_required_alignment() {
+ std::same_as<const std::size_t> decltype(auto) required_alignment = std::atomic_ref<T>::required_alignment;
+ assert(required_alignment >= alignof(T));
+}
+
+constexpr bool test() {
+ check_required_alignment<int>();
+ check_required_alignment<float>();
+ check_required_alignment<int*>();
+ struct Empty {};
+ check_required_alignment<Empty>();
+ struct Trivial {
+ int a;
+ };
+ check_required_alignment<Trivial>();
+ return true;
+}
+
+int main(int, char**) {
+ test();
+ static_assert(test());
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/requires-trivially-copyable.verify.cpp b/libcxx/test/std/atomics/atomics.ref/requires-trivially-copyable.verify.cpp
new file mode 100644
index 000000000000..9a8b036ffd1f
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/requires-trivially-copyable.verify.cpp
@@ -0,0 +1,26 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+
+// <atomic_ref>
+
+// template<class T>
+// class atomic_ref;
+
+// The program is ill-formed if is_trivially_copyable_v<T> is false.
+
+#include <atomic>
+
+void trivially_copyable() {
+ struct X {
+ X() = default;
+ X(X const&) {} // -> not trivially copyable
+ } x;
+ // expected-error-re@*:* {{static assertion failed {{.*}}atomic_ref<T> requires that 'T' be a trivially copyable type}}
+ std::atomic_ref<X> r(x);
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/store.pass.cpp b/libcxx/test/std/atomics/atomics.ref/store.pass.cpp
new file mode 100644
index 000000000000..ea01a3d02a34
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/store.pass.cpp
@@ -0,0 +1,61 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// void store(T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestStore {
+ void operator()() const {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ a.store(T(2));
+ assert(x == T(2));
+ ASSERT_NOEXCEPT(a.store(T(1)));
+
+ a.store(T(3), std::memory_order_seq_cst);
+ assert(x == T(3));
+ ASSERT_NOEXCEPT(a.store(T(0), std::memory_order_seq_cst));
+
+ // TODO memory_order::relaxed
+
+ // memory_order::seq_cst
+ {
+ auto store_no_arg = [](std::atomic_ref<T> const& y, T, T new_val) { y.store(new_val); };
+ auto store_with_order = [](std::atomic_ref<T> const& y, T, T new_val) {
+ y.store(new_val, std::memory_order::seq_cst);
+ };
+ auto load = [](std::atomic_ref<T> const& y) { return y.load(); };
+ test_seq_cst<T>(store_no_arg, load);
+ test_seq_cst<T>(store_with_order, load);
+ }
+
+ // memory_order::release
+ {
+ auto store = [](std::atomic_ref<T> const& y, T, T new_val) { y.store(new_val, std::memory_order::release); };
+ auto load = [](std::atomic_ref<T> const& y) { return y.load(std::memory_order::acquire); };
+ test_acquire_release<T>(store, load);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestStore>()();
+ return 0;
+}
diff --git a/libcxx/test/std/atomics/atomics.ref/test_helper.h b/libcxx/test/std/atomics/atomics.ref/test_helper.h
new file mode 100644
index 000000000000..225a70c5a16c
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/test_helper.h
@@ -0,0 +1,136 @@
+//===----------------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TEST_STD_ATOMICS_ATOMIC_REF_TEST_HELPER_H
+#define TEST_STD_ATOMICS_ATOMIC_REF_TEST_HELPER_H
+
+#include <atomic>
+#include <cassert>
+#include <cmath>
+#include <vector>
+
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include "make_test_thread.h"
+# include <thread>
+#endif
+
+template <class T>
+bool equals(T x, T y) {
+ return x == y;
+}
+
+template <class T>
+T make_value(int i) {
+ assert(i == 0 || i == 1);
+ if constexpr (std::is_pointer_v<T>) {
+ // So that pointers returned can be subtracted from one another
+ static std::remove_const_t<std::remove_pointer_t<T>> d[2];
+ return &d[i];
+ } else {
+ return T(i);
+ }
+}
+
+// Test that all threads see the exact same sequence of events
+// Test will pass 100% if store_op and load_op are correctly
+// affecting the memory with seq_cst order
+template <class T, class StoreOp, class LoadOp>
+void test_seq_cst(StoreOp store_op, LoadOp load_op) {
+#ifndef TEST_HAS_NO_THREADS
+ for (int i = 0; i < 100; ++i) {
+ T old_value(make_value<T>(0));
+ T new_value(make_value<T>(1));
+
+ T copy_x = old_value;
+ std::atomic_ref<T> const x(copy_x);
+ T copy_y = old_value;
+ std::atomic_ref<T> const y(copy_y);
+
+ std::atomic_bool x_updated_first(false);
+ std::atomic_bool y_updated_first(false);
+
+ auto t1 = support::make_test_thread([&] { store_op(x, old_value, new_value); });
+
+ auto t2 = support::make_test_thread([&] { store_op(y, old_value, new_value); });
+
+ auto t3 = support::make_test_thread([&] {
+ while (!equals(load_op(x), new_value)) {
+ std::this_thread::yield();
+ }
+ if (!equals(load_op(y), new_value)) {
+ x_updated_first.store(true, std::memory_order_relaxed);
+ }
+ });
+
+ auto t4 = support::make_test_thread([&] {
+ while (!equals(load_op(y), new_value)) {
+ std::this_thread::yield();
+ }
+ if (!equals(load_op(x), new_value)) {
+ y_updated_first.store(true, std::memory_order_relaxed);
+ }
+ });
+
+ t1.join();
+ t2.join();
+ t3.join();
+ t4.join();
+ // thread 3 and thread 4 cannot see different orders of storing x and y
+ assert(!(x_updated_first && y_updated_first));
+ }
+#else
+ (void)store_op;
+ (void)load_op;
+#endif
+}
+
+// Test that all writes before the store are seen by other threads after the load
+// Test will pass 100% if store_op and load_op are correctly
+// affecting the memory with acquire-release order
+template <class T, class StoreOp, class LoadOp>
+void test_acquire_release(StoreOp store_op, LoadOp load_op) {
+#ifndef TEST_HAS_NO_THREADS
+ for (auto i = 0; i < 100; ++i) {
+ T old_value(make_value<T>(0));
+ T new_value(make_value<T>(1));
+
+ T copy = old_value;
+ std::atomic_ref<T> const at(copy);
+ int non_atomic = 5;
+
+ constexpr auto number_of_threads = 8;
+ std::vector<std::thread> threads;
+ threads.reserve(number_of_threads);
+
+ for (auto j = 0; j < number_of_threads; ++j) {
+ threads.push_back(support::make_test_thread([&at, &non_atomic, load_op, new_value] {
+ while (!equals(load_op(at), new_value)) {
+ std::this_thread::yield();
+ }
+ // Other thread's writes before the release store are visible
+ // in this thread's read after the acquire load
+ assert(non_atomic == 6);
+ }));
+ }
+
+ non_atomic = 6;
+ store_op(at, old_value, new_value);
+
+ for (auto& thread : threads) {
+ thread.join();
+ }
+ }
+#else
+ (void)store_op;
+ (void)load_op;
+#endif
+}
+
+#endif // TEST_STD_ATOMICS_ATOMIC_REF_TEST_HELPER_H
diff --git a/libcxx/test/std/atomics/atomics.ref/wait.pass.cpp b/libcxx/test/std/atomics/atomics.ref/wait.pass.cpp
new file mode 100644
index 000000000000..e5310febf5c5
--- /dev/null
+++ b/libcxx/test/std/atomics/atomics.ref/wait.pass.cpp
@@ -0,0 +1,88 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// UNSUPPORTED: c++03, c++11, c++14, c++17
+// UNSUPPORTED: no-threads
+// XFAIL: availability-synchronization_library-missing
+// XFAIL: !has-64-bit-atomics
+// XFAIL: !has-1024-bit-atomics
+
+// void wait(T, memory_order = memory_order::seq_cst) const noexcept;
+
+#include <atomic>
+#include <cassert>
+#include <type_traits>
+
+#include "atomic_helpers.h"
+#include "make_test_thread.h"
+#include "test_helper.h"
+#include "test_macros.h"
+
+template <typename T>
+struct TestWait {
+ void operator()() const {
+ {
+ T x(T(1));
+ std::atomic_ref<T> const a(x);
+
+ assert(a.load() == T(1));
+ a.wait(T(0));
+ std::thread t1 = support::make_test_thread([&]() {
+ a.store(T(3));
+ a.notify_one();
+ });
+ a.wait(T(1));
+ assert(a.load() == T(3));
+ t1.join();
+ ASSERT_NOEXCEPT(a.wait(T(0)));
+
+ assert(a.load() == T(3));
+ a.wait(T(0), std::memory_order_seq_cst);
+ std::thread t2 = support::make_test_thread([&]() {
+ a.store(T(5));
+ a.notify_one();
+ });
+ a.wait(T(3), std::memory_order_seq_cst);
+ assert(a.load() == T(5));
+ t2.join();
+ ASSERT_NOEXCEPT(a.wait(T(0), std::memory_order_seq_cst));
+ }
+
+ // memory_order::acquire
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val, std::memory_order::release); };
+ auto load = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ x.wait(T(255), std::memory_order::acquire);
+ return result;
+ };
+ test_acquire_release<T>(store, load);
+ }
+
+ // memory_order::seq_cst
+ {
+ auto store = [](std::atomic_ref<T> const& x, T, T new_val) { x.store(new_val); };
+ auto load_no_arg = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ x.wait(T(255));
+ return result;
+ };
+ auto load_with_order = [](std::atomic_ref<T> const& x) {
+ auto result = x.load(std::memory_order::relaxed);
+ x.wait(T(255), std::memory_order::seq_cst);
+ return result;
+ };
+ test_seq_cst<T>(store, load_no_arg);
+ test_seq_cst<T>(store, load_with_order);
+ }
+ }
+};
+
+int main(int, char**) {
+ TestEachAtomicType<TestWait>()();
+ return 0;
+}
diff --git a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp
index c802ab787682..fbd1c7c5715e 100644
--- a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_double.pass.cpp
@@ -6,6 +6,11 @@
//
//===----------------------------------------------------------------------===//
+// The fix for LWG2381 (https://github.com/llvm/llvm-project/pull/77948) changed
+// behavior of FP parsing, while Apple back-deployment targets remain broken due
+// to the dylib.
+// UNSUPPORTED: using-built-library-before-llvm-19
+
// <locale>
// class num_get<charT, InputIterator>
@@ -116,9 +121,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == INFINITY);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
}
{
const char str[] = "INF";
@@ -128,9 +133,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == INFINITY);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
}
{
const char str[] = "-inf";
@@ -140,9 +145,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == -INFINITY);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
}
{
const char str[] = "-INF";
@@ -152,9 +157,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == -INFINITY);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
}
{
const char str[] = "nan";
@@ -164,9 +169,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(std::isnan(v));
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
}
{
const char str[] = "NAN";
@@ -176,9 +181,129 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(std::isnan(v));
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "+p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "+P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "-p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "-P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "+e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "+E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "-e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
+ }
+ {
+ const char str[] = "-E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0);
}
{
v = -1;
diff --git a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp
index 79c8480d0699..b5ac7d876157 100644
--- a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_float.pass.cpp
@@ -6,6 +6,11 @@
//
//===----------------------------------------------------------------------===//
+// The fix for LWG2381 (https://github.com/llvm/llvm-project/pull/77948) changed
+// behavior of FP parsing, while Apple back-deployment targets remain broken due
+// to the dylib.
+// UNSUPPORTED: using-built-library-before-llvm-19
+
// <locale>
// class num_get<charT, InputIterator>
@@ -105,9 +110,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == INFINITY);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
}
{
const char str[] = "INF";
@@ -117,9 +122,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == INFINITY);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
}
{
const char str[] = "-inf";
@@ -129,9 +134,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == -INFINITY);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
}
{
const char str[] = "-INF";
@@ -141,9 +146,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == -INFINITY);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
}
{
const char str[] = "nan";
@@ -153,9 +158,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(std::isnan(v));
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
}
{
const char str[] = "NAN";
@@ -165,9 +170,129 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(std::isnan(v));
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "+p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "+P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "-p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "-P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "+e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "+E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "-e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
+ }
+ {
+ const char str[] = "-E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0f);
}
{
v = -1;
diff --git a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp
index e2b2aeafd1ef..9617899f749c 100644
--- a/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp
+++ b/libcxx/test/std/localization/locale.categories/category.numeric/locale.num.get/facet.num.get.members/get_long_double.pass.cpp
@@ -6,6 +6,11 @@
//
//===----------------------------------------------------------------------===//
+// The fix for LWG2381 (https://github.com/llvm/llvm-project/pull/77948) changed
+// behavior of FP parsing, while Apple back-deployment targets remain broken due
+// to the dylib.
+// UNSUPPORTED: using-built-library-before-llvm-19
+
// <locale>
// class num_get<charT, InputIterator>
@@ -105,9 +110,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == INFINITY);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
}
{
const char str[] = "INF";
@@ -117,9 +122,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == INFINITY);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
}
{
const char str[] = "-inf";
@@ -129,9 +134,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == -INFINITY);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
}
{
const char str[] = "-INF";
@@ -141,9 +146,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(v == -INFINITY);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
}
{
const char str[] = "nan";
@@ -153,9 +158,9 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(std::isnan(v));
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
}
{
const char str[] = "NAN";
@@ -165,9 +170,129 @@ int main(int, char**)
f.get(cpp17_input_iterator<const char*>(str),
cpp17_input_iterator<const char*>(str+sizeof(str)),
ios, err, v);
- assert(base(iter) == str+sizeof(str)-1);
- assert(err == ios.goodbit);
- assert(std::isnan(v));
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "+p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "+P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "-p00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "-P00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "+e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "+E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "-e00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
+ }
+ {
+ const char str[] = "-E00";
+ std::hex(ios);
+ std::ios_base::iostate err = ios.goodbit;
+ cpp17_input_iterator<const char*> iter = f.get(
+ cpp17_input_iterator<const char*>(str), cpp17_input_iterator<const char*>(str + sizeof(str)), ios, err, v);
+ assert(base(iter) == str + 1);
+ assert(err == ios.failbit);
+ assert(v == 0.0l);
}
{
const char str[] = "1.189731495357231765021264e+49321";
diff --git a/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/assign.pass.cpp b/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/assign.pass.cpp
index 63a1a8adf4e3..d71b76926ce3 100644
--- a/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/assign.pass.cpp
+++ b/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/assign.pass.cpp
@@ -84,12 +84,12 @@ int main(int, char**)
test_ext<unsigned int>();
test<unsigned long>();
// This isn't implemented on platforms without __int128
-#ifndef _LIBCPP_HAS_NO_INT128
+#ifndef TEST_HAS_NO_INT128
test_ext<unsigned long>();
#endif
test<unsigned long long>();
// This isn't implemented on platforms without __int128
-#ifndef _LIBCPP_HAS_NO_INT128
+#ifndef TEST_HAS_NO_INT128
test_ext<unsigned long long>();
#endif
diff --git a/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/copy.pass.cpp b/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/copy.pass.cpp
index c45f45d0f20a..50389ef80139 100644
--- a/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/copy.pass.cpp
+++ b/libcxx/test/std/numerics/rand/rand.eng/rand.eng.lcong/copy.pass.cpp
@@ -83,12 +83,12 @@ int main(int, char**)
test_ext<unsigned int>();
test<unsigned long>();
// This isn't implemented on platforms without __int128
-#ifndef _LIBCPP_HAS_NO_INT128
+#ifndef TEST_HAS_NO_INT128
test_ext<unsigned long>();
#endif
test<unsigned long long>();
// This isn't implemented on platforms without __int128
-#ifndef _LIBCPP_HAS_NO_INT128
+#ifndef TEST_HAS_NO_INT128
test_ext<unsigned long long>();
#endif
diff --git a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.cons/mutex.pass.cpp b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.cons/mutex.pass.cpp
index 4940041bcf96..ece330134f2c 100644
--- a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.cons/mutex.pass.cpp
+++ b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.cons/mutex.pass.cpp
@@ -5,10 +5,9 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
+
// UNSUPPORTED: no-threads
// UNSUPPORTED: c++03, c++11
-// ALLOW_RETRIES: 2
// <shared_mutex>
@@ -19,9 +18,8 @@
// template<class _Mutex> shared_lock(shared_lock<_Mutex>)
// -> shared_lock<_Mutex>; // C++17
+#include <atomic>
#include <cassert>
-#include <chrono>
-#include <cstdlib>
#include <shared_mutex>
#include <thread>
#include <vector>
@@ -29,77 +27,77 @@
#include "make_test_thread.h"
#include "test_macros.h"
-typedef std::chrono::system_clock Clock;
-typedef Clock::time_point time_point;
-typedef Clock::duration duration;
-typedef std::chrono::milliseconds ms;
-typedef std::chrono::nanoseconds ns;
-
-ms WaitTime = ms(250);
-
-// Thread sanitizer causes more overhead and will sometimes cause this test
-// to fail. To prevent this we give Thread sanitizer more time to complete the
-// test.
-#if !defined(TEST_IS_EXECUTED_IN_A_SLOW_ENVIRONMENT)
-ms Tolerance = ms(50);
-#else
-ms Tolerance = ms(50 * 5);
-#endif
+struct Monitor {
+ bool lock_shared_called = false;
+ bool unlock_shared_called = false;
+};
-std::shared_timed_mutex m;
+struct TrackedMutex {
+ Monitor* monitor = nullptr;
-void f()
-{
- time_point t0 = Clock::now();
- time_point t1;
- {
- std::shared_lock<std::shared_timed_mutex> ul(m);
- t1 = Clock::now();
- }
- ns d = t1 - t0 - WaitTime;
- assert(d < Tolerance); // within tolerance
-}
+ void lock_shared() {
+ if (monitor != nullptr)
+ monitor->lock_shared_called = true;
+ }
+ void unlock_shared() {
+ if (monitor != nullptr)
+ monitor->unlock_shared_called = true;
+ }
+};
-void g()
-{
- time_point t0 = Clock::now();
- time_point t1;
- {
- std::shared_lock<std::shared_timed_mutex> ul(m);
- t1 = Clock::now();
- }
- ns d = t1 - t0;
- assert(d < Tolerance); // within tolerance
-}
+template <class Mutex>
+void test() {
+ // Basic sanity test
+ {
+ Mutex mutex;
+ std::vector<std::thread> threads;
+ std::atomic<bool> ready(false);
+ for (int i = 0; i != 5; ++i) {
+ threads.push_back(support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
-int main(int, char**)
-{
- std::vector<std::thread> v;
- {
- m.lock();
- for (int i = 0; i < 5; ++i)
- v.push_back(support::make_test_thread(f));
- std::this_thread::sleep_for(WaitTime);
- m.unlock();
- for (auto& t : v)
- t.join();
- }
- {
- m.lock_shared();
- for (auto& t : v)
- t = support::make_test_thread(g);
- std::thread q = support::make_test_thread(f);
- std::this_thread::sleep_for(WaitTime);
- m.unlock_shared();
- for (auto& t : v)
- t.join();
- q.join();
+ std::shared_lock<Mutex> lock(mutex);
+ assert(lock.owns_lock());
+ }));
}
+ ready = true;
+ for (auto& t : threads)
+ t.join();
+ }
+
+ // Test CTAD
+ {
+#if TEST_STD_VER >= 17
+ Mutex mutex;
+ std::shared_lock lock(mutex);
+ static_assert(std::is_same<decltype(lock), std::shared_lock<Mutex>>::value);
+#endif
+ }
+}
+
+int main(int, char**) {
#if TEST_STD_VER >= 17
- std::shared_lock sl(m);
- static_assert((std::is_same<decltype(sl), std::shared_lock<decltype(m)>>::value), "" );
+ test<std::shared_mutex>();
#endif
+ test<std::shared_timed_mutex>();
+ test<TrackedMutex>();
+
+ // Use shared_lock with a dummy mutex class that tracks whether each
+ // operation has been called or not.
+ {
+ Monitor monitor;
+ TrackedMutex mutex{&monitor};
+
+ std::shared_lock<TrackedMutex> lock(mutex);
+ assert(monitor.lock_shared_called);
+ assert(lock.owns_lock());
+
+ lock.unlock();
+ assert(monitor.unlock_shared_called);
+ }
return 0;
}
diff --git a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/lock.pass.cpp b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/lock.pass.cpp
index edb7c42356ac..d36ca1d38f8f 100644
--- a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/lock.pass.cpp
+++ b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/lock.pass.cpp
@@ -5,10 +5,9 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
+
// UNSUPPORTED: no-threads
// UNSUPPORTED: c++03, c++11
-// ALLOW_RETRIES: 2
// <shared_mutex>
@@ -16,10 +15,9 @@
// void lock();
+#include <atomic>
#include <cassert>
-#include <chrono>
-#include <cstdlib>
-#include <mutex>
+#include <mutex> // std::defer_lock
#include <shared_mutex>
#include <system_error>
#include <thread>
@@ -28,71 +26,99 @@
#include "make_test_thread.h"
#include "test_macros.h"
-std::shared_timed_mutex m;
+struct Monitor {
+ bool lock_shared_called = false;
+ bool unlock_shared_called = false;
+};
-typedef std::chrono::system_clock Clock;
-typedef Clock::time_point time_point;
-typedef Clock::duration duration;
-typedef std::chrono::milliseconds ms;
-typedef std::chrono::nanoseconds ns;
+struct TrackedMutex {
+ Monitor* monitor = nullptr;
-ms WaitTime = ms(250);
+ void lock_shared() {
+ if (monitor != nullptr)
+ monitor->lock_shared_called = true;
+ }
+ void unlock_shared() {
+ if (monitor != nullptr)
+ monitor->unlock_shared_called = true;
+ }
+};
-// Thread sanitizer causes more overhead and will sometimes cause this test
-// to fail. To prevent this we give Thread sanitizer more time to complete the
-// test.
-#if !defined(TEST_IS_EXECUTED_IN_A_SLOW_ENVIRONMENT)
-ms Tolerance = ms(25);
-#else
-ms Tolerance = ms(25 * 5);
-#endif
+template <class Mutex>
+void test() {
+ // Basic sanity test
+ {
+ Mutex mutex;
+ std::vector<std::thread> threads;
+ std::atomic<bool> ready(false);
+ for (int i = 0; i != 5; ++i) {
+ threads.push_back(support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
+ std::shared_lock<Mutex> lock(mutex, std::defer_lock);
+ lock.lock();
+ assert(lock.owns_lock());
+ }));
+ }
+
+ ready = true;
+ for (auto& t : threads)
+ t.join();
+ }
-void f()
-{
- std::shared_lock<std::shared_timed_mutex> lk(m, std::defer_lock);
- time_point t0 = Clock::now();
- lk.lock();
- time_point t1 = Clock::now();
- assert(lk.owns_lock() == true);
- ns d = t1 - t0 - WaitTime;
- assert(d < Tolerance); // within tolerance
+ // Try locking the same shared_lock again in the same thread. This should throw an exception.
+ {
+ Mutex mutex;
+ std::shared_lock<Mutex> lock(mutex, std::defer_lock);
+ lock.lock();
+ assert(lock.owns_lock());
#ifndef TEST_HAS_NO_EXCEPTIONS
- try
- {
- lk.lock();
- assert(false);
- }
- catch (std::system_error& e)
- {
- assert(e.code().value() == EDEADLK);
+ try {
+ lock.lock();
+ assert(false);
+ } catch (std::system_error const& e) {
+ assert(e.code() == std::errc::resource_deadlock_would_occur);
}
#endif
- lk.unlock();
- lk.release();
+ }
+
+ // Try locking a shared_lock that isn't associated to any mutex. This should throw an exception.
+ {
+ std::shared_lock<Mutex> lock; // no associated mutex
#ifndef TEST_HAS_NO_EXCEPTIONS
- try
- {
- lk.lock();
- assert(false);
- }
- catch (std::system_error& e)
- {
- assert(e.code().value() == EPERM);
+ try {
+ lock.lock();
+ assert(false);
+ } catch (std::system_error const& e) {
+ assert(e.code() == std::errc::operation_not_permitted);
}
#endif
+ }
}
-int main(int, char**)
-{
- m.lock();
- std::vector<std::thread> v;
- for (int i = 0; i < 5; ++i)
- v.push_back(support::make_test_thread(f));
- std::this_thread::sleep_for(WaitTime);
- m.unlock();
- for (auto& t : v)
- t.join();
+int main(int, char**) {
+#if TEST_STD_VER >= 17
+ test<std::shared_mutex>();
+#endif
+ test<std::shared_timed_mutex>();
+ test<TrackedMutex>();
+
+ // Use shared_lock with a dummy mutex class that tracks whether each
+ // operation has been called or not.
+ {
+ Monitor monitor;
+ TrackedMutex mutex{&monitor};
+
+ std::shared_lock<TrackedMutex> lock(mutex, std::defer_lock);
+ lock.lock();
+ assert(monitor.lock_shared_called);
+ assert(lock.owns_lock());
+
+ lock.unlock();
+ assert(monitor.unlock_shared_called);
+ }
return 0;
}
diff --git a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/try_lock.pass.cpp b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/try_lock.pass.cpp
index 0e707fcf2d50..b6146680b6e3 100644
--- a/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/try_lock.pass.cpp
+++ b/libcxx/test/std/thread/thread.mutex/thread.lock/thread.lock.shared/thread.lock.shared.locking/try_lock.pass.cpp
@@ -5,11 +5,9 @@
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
-//
+
// UNSUPPORTED: no-threads
// UNSUPPORTED: c++03, c++11
-//
-// ALLOW_RETRIES: 2
// <shared_mutex>
@@ -17,60 +15,115 @@
// bool try_lock();
+#include <atomic>
#include <cassert>
-#include <mutex>
+#include <mutex> // std::defer_lock
#include <shared_mutex>
#include <system_error>
+#include <thread>
+#include <vector>
+#include "make_test_thread.h"
#include "test_macros.h"
-bool try_lock_called = false;
+struct Monitor {
+ bool try_lock_shared_called = false;
+ bool unlock_shared_called = false;
+};
-struct mutex
-{
- bool try_lock_shared()
- {
- try_lock_called = !try_lock_called;
- return try_lock_called;
- }
- void unlock_shared() {}
+struct TrackedMutex {
+ Monitor* monitor = nullptr;
+
+ bool try_lock_shared() {
+ if (monitor != nullptr)
+ monitor->try_lock_shared_called = true;
+ return true;
+ }
+ void unlock_shared() {
+ if (monitor != nullptr)
+ monitor->unlock_shared_called = true;
+ }
};
-mutex m;
+template <class Mutex>
+void test() {
+ // Basic sanity test
+ {
+ Mutex mutex;
+ std::vector<std::thread> threads;
+ std::atomic<bool> ready(false);
+ for (int i = 0; i != 5; ++i) {
+ threads.push_back(support::make_test_thread([&] {
+ while (!ready) {
+ // spin
+ }
-int main(int, char**)
-{
- std::shared_lock<mutex> lk(m, std::defer_lock);
- assert(lk.try_lock() == true);
- assert(try_lock_called == true);
- assert(lk.owns_lock() == true);
-#ifndef TEST_HAS_NO_EXCEPTIONS
- try
- {
- TEST_IGNORE_NODISCARD lk.try_lock();
- assert(false);
+ std::shared_lock<Mutex> lock(mutex, std::defer_lock);
+ bool result = lock.try_lock();
+ assert(result);
+ assert(lock.owns_lock());
+ }));
}
- catch (std::system_error& e)
- {
- assert(e.code().value() == EDEADLK);
+
+ ready = true;
+ for (auto& t : threads)
+ t.join();
+ }
+
+ // Make sure that we throw an exception if we try to re-lock a mutex that is
+ // already locked by the current thread.
+ {
+ Mutex mutex;
+
+ std::shared_lock<Mutex> lock(mutex, std::defer_lock);
+ assert(lock.try_lock());
+ assert(lock.owns_lock());
+#ifndef TEST_HAS_NO_EXCEPTIONS
+ try {
+ TEST_IGNORE_NODISCARD lock.try_lock();
+ assert(false);
+ } catch (std::system_error const& e) {
+ assert(e.code() == std::errc::resource_deadlock_would_occur);
}
#endif
- lk.unlock();
- assert(lk.try_lock() == false);
- assert(try_lock_called == false);
- assert(lk.owns_lock() == false);
- lk.release();
+ }
+
+ // Make sure that we throw an exception if we try to lock a shared_lock
+ // that is not associated to any mutex.
+ {
+ std::shared_lock<Mutex> lock; // not associated to a mutex
#ifndef TEST_HAS_NO_EXCEPTIONS
- try
- {
- TEST_IGNORE_NODISCARD lk.try_lock();
- assert(false);
- }
- catch (std::system_error& e)
- {
- assert(e.code().value() == EPERM);
+ try {
+ TEST_IGNORE_NODISCARD lock.try_lock();
+ assert(false);
+ } catch (std::system_error const& e) {
+ assert(e.code() == std::errc::operation_not_permitted);
}
#endif
+ }
+}
+
+int main(int, char**) {
+#if TEST_STD_VER >= 17
+ test<std::shared_mutex>();
+#endif
+ test<std::shared_timed_mutex>();
+ test<TrackedMutex>();
+
+ // Use shared_lock with a dummy mutex class that tracks whether each
+ // operation has been called or not.
+ {
+ Monitor monitor;
+ TrackedMutex mutex{&monitor};
+
+ std::shared_lock<TrackedMutex> lock(mutex, std::defer_lock);
+ bool result = lock.try_lock();
+ assert(result);
+ assert(monitor.try_lock_shared_called);
+ assert(lock.owns_lock());
+ lock.unlock();
+ assert(monitor.unlock_shared_called);
+ }
return 0;
}
diff --git a/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/has_unique_object_representations.pass.cpp b/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/has_unique_object_representations.pass.cpp
index ce34c8e958db..b8b84bb90882 100644
--- a/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/has_unique_object_representations.pass.cpp
+++ b/libcxx/test/std/utilities/meta/meta.unary/meta.unary.prop/has_unique_object_representations.pass.cpp
@@ -99,6 +99,8 @@ int main(int, char**)
test_has_unique_object_representations<unsigned>();
test_has_unique_object_representations<NonEmptyUnion>();
test_has_unique_object_representations<char[3]>();
+ test_has_unique_object_representations<char[3][4]>();
+ test_has_unique_object_representations<char[3][4][5]>();
test_has_unique_object_representations<char[]>();
diff --git a/libcxx/utils/libcxx/test/features.py b/libcxx/utils/libcxx/test/features.py
index c81b56b1af54..093cd39ea64c 100644
--- a/libcxx/utils/libcxx/test/features.py
+++ b/libcxx/utils/libcxx/test/features.py
@@ -38,6 +38,39 @@ def _getAndroidDeviceApi(cfg):
)
)
+
+def _mingwSupportsModules(cfg):
+ # Only mingw headers are known to work with libc++ built as a module,
+ # at the moment.
+ if not "__MINGW32__" in compilerMacros(cfg):
+ return False
+ # For mingw headers, check for a version known to support being built
+ # as a module.
+ return sourceBuilds(
+ cfg,
+ """
+ #include <_mingw_mac.h>
+ #if __MINGW64_VERSION_MAJOR < 12
+ #error Headers known to be incompatible
+ #elif __MINGW64_VERSION_MAJOR == 12
+ // The headers were fixed to work with libc++ modules during
+ // __MINGW64_VERSION_MAJOR == 12. The headers became compatible
+ // with libc++ built as a module in
+ // 1652e9241b5d8a5a779c6582b1c3c4f4a7cc66e5 (Apr 2024), but the
+ // following commit 8c13b28ace68f2c0094d45121d59a4b951b533ed
+ // removed the now unused __mingw_static_ovr define. Use this
+ // as indicator for whether we've got new enough headers.
+ #ifdef __mingw_static_ovr
+ #error Headers too old
+ #endif
+ #else
+ // __MINGW64_VERSION_MAJOR > 12 should be ok.
+ #endif
+ int main() { return 0; }
+ """,
+ )
+
+
# Lit features are evaluated in order. Some checks may require the compiler detection to have
# run first in order to work properly.
DEFAULT_FEATURES = [
@@ -281,7 +314,7 @@ DEFAULT_FEATURES = [
# Any declaration of a library function shall have external linkage.
when=lambda cfg: "__ANDROID__" in compilerMacros(cfg)
or "__FreeBSD__" in compilerMacros(cfg)
- or "_WIN32" in compilerMacros(cfg)
+ or ("_WIN32" in compilerMacros(cfg) and not _mingwSupportsModules(cfg))
or platform.system().lower().startswith("aix")
# Avoid building on platforms that don't support modules properly.
or not hasCompileFlag(cfg, "-Wno-reserved-module-identifier"),
diff --git a/libunwind/include/__libunwind_config.h b/libunwind/include/__libunwind_config.h
index 8db336b2d727..028b9e3baa80 100644
--- a/libunwind/include/__libunwind_config.h
+++ b/libunwind/include/__libunwind_config.h
@@ -180,6 +180,10 @@
#endif
#define _LIBUNWIND_HIGHEST_DWARF_REGISTER \
_LIBUNWIND_HIGHEST_DWARF_REGISTER_LOONGARCH
+#elif defined(__wasm__)
+// Unused
+#define _LIBUNWIND_CONTEXT_SIZE 0
+#define _LIBUNWIND_CURSOR_SIZE 0
# else
# error "Unsupported architecture."
# endif
diff --git a/libunwind/src/UnwindLevel1.c b/libunwind/src/UnwindLevel1.c
index 05d0f2cb0a0a..48e7bc3b9e00 100644
--- a/libunwind/src/UnwindLevel1.c
+++ b/libunwind/src/UnwindLevel1.c
@@ -31,7 +31,8 @@
#include "libunwind_ext.h"
#include "unwind.h"
-#if !defined(_LIBUNWIND_ARM_EHABI) && !defined(__USING_SJLJ_EXCEPTIONS__)
+#if !defined(_LIBUNWIND_ARM_EHABI) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
+ !defined(__wasm__)
#ifndef _LIBUNWIND_SUPPORT_SEH_UNWIND
diff --git a/libunwind/src/UnwindRegistersRestore.S b/libunwind/src/UnwindRegistersRestore.S
index 42c2488fc7cf..67d9e0571189 100644
--- a/libunwind/src/UnwindRegistersRestore.S
+++ b/libunwind/src/UnwindRegistersRestore.S
@@ -20,7 +20,7 @@
.text
#endif
-#if !defined(__USING_SJLJ_EXCEPTIONS__)
+#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__)
#if defined(__i386__)
DEFINE_LIBUNWIND_FUNCTION(__libunwind_Registers_x86_jumpto)
@@ -1232,7 +1232,7 @@ DEFINE_LIBUNWIND_FUNCTION(_ZN9libunwind19Registers_loongarch6jumptoEv)
#endif
-#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
+#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) */
NO_EXEC_STACK_DIRECTIVE
diff --git a/libunwind/src/UnwindRegistersSave.S b/libunwind/src/UnwindRegistersSave.S
index 19a0e87d683c..5bf6055fe414 100644
--- a/libunwind/src/UnwindRegistersSave.S
+++ b/libunwind/src/UnwindRegistersSave.S
@@ -20,7 +20,7 @@
.text
#endif
-#if !defined(__USING_SJLJ_EXCEPTIONS__)
+#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__)
#if defined(__i386__)
@@ -1177,6 +1177,6 @@ DEFINE_LIBUNWIND_FUNCTION(__unw_getcontext)
WEAK_ALIAS(__unw_getcontext, unw_getcontext)
-#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) */
+#endif /* !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__) */
NO_EXEC_STACK_DIRECTIVE
diff --git a/libunwind/src/libunwind.cpp b/libunwind/src/libunwind.cpp
index 217dde909863..7e5c6bd263e1 100644
--- a/libunwind/src/libunwind.cpp
+++ b/libunwind/src/libunwind.cpp
@@ -26,7 +26,7 @@
#include <sanitizer/asan_interface.h>
#endif
-#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__USING_WASM_EXCEPTIONS__)
+#if !defined(__USING_SJLJ_EXCEPTIONS__) && !defined(__wasm__)
#include "AddressSpace.hpp"
#include "UnwindCursor.hpp"
@@ -348,7 +348,7 @@ void __unw_remove_dynamic_eh_frame_section(unw_word_t eh_frame_start) {
#endif // defined(_LIBUNWIND_SUPPORT_DWARF_UNWIND)
#endif // !defined(__USING_SJLJ_EXCEPTIONS__) &&
- // !defined(__USING_WASM_EXCEPTIONS__)
+ // !defined(__wasm__)
#ifdef __APPLE__
diff --git a/lld/COFF/DriverUtils.cpp b/lld/COFF/DriverUtils.cpp
index b4ff31a606da..6e8f74c83be4 100644
--- a/lld/COFF/DriverUtils.cpp
+++ b/lld/COFF/DriverUtils.cpp
@@ -21,6 +21,7 @@
#include "llvm/ADT/StringExtras.h"
#include "llvm/ADT/StringSwitch.h"
#include "llvm/BinaryFormat/COFF.h"
+#include "llvm/IR/Mangler.h"
#include "llvm/Object/COFF.h"
#include "llvm/Object/WindowsResource.h"
#include "llvm/Option/Arg.h"
@@ -39,6 +40,7 @@
#include <optional>
using namespace llvm::COFF;
+using namespace llvm::object;
using namespace llvm::opt;
using namespace llvm;
using llvm::sys::Process;
@@ -632,18 +634,6 @@ err:
fatal("invalid /export: " + arg);
}
-static StringRef undecorate(COFFLinkerContext &ctx, StringRef sym) {
- if (ctx.config.machine != I386)
- return sym;
- // In MSVC mode, a fully decorated stdcall function is exported
- // as-is with the leading underscore (with type IMPORT_NAME).
- // In MinGW mode, a decorated stdcall function gets the underscore
- // removed, just like normal cdecl functions.
- if (sym.starts_with("_") && sym.contains('@') && !ctx.config.mingw)
- return sym;
- return sym.starts_with("_") ? sym.substr(1) : sym;
-}
-
// Convert stdcall/fastcall style symbols into unsuffixed symbols,
// with or without a leading underscore. (MinGW specific.)
static StringRef killAt(StringRef sym, bool prefix) {
@@ -693,11 +683,29 @@ void LinkerDriver::fixupExports() {
for (Export &e : ctx.config.exports) {
if (!e.exportAs.empty()) {
e.exportName = e.exportAs;
- } else if (!e.forwardTo.empty()) {
- e.exportName = undecorate(ctx, e.name);
- } else {
- e.exportName = undecorate(ctx, e.extName.empty() ? e.name : e.extName);
+ continue;
+ }
+
+ StringRef sym =
+ !e.forwardTo.empty() || e.extName.empty() ? e.name : e.extName;
+ if (ctx.config.machine == I386 && sym.starts_with("_")) {
+ // In MSVC mode, a fully decorated stdcall function is exported
+ // as-is with the leading underscore (with type IMPORT_NAME).
+ // In MinGW mode, a decorated stdcall function gets the underscore
+ // removed, just like normal cdecl functions.
+ if (ctx.config.mingw || !sym.contains('@')) {
+ e.exportName = sym.substr(1);
+ continue;
+ }
+ }
+ if (isArm64EC(ctx.config.machine) && !e.data && !e.constant) {
+ if (std::optional<std::string> demangledName =
+ getArm64ECDemangledFunctionName(sym)) {
+ e.exportName = saver().save(*demangledName);
+ continue;
+ }
}
+ e.exportName = sym;
}
if (ctx.config.killAt && ctx.config.machine == I386) {
diff --git a/lld/ELF/Config.h b/lld/ELF/Config.h
index dbb81412453a..f0dfe7f377de 100644
--- a/lld/ELF/Config.h
+++ b/lld/ELF/Config.h
@@ -102,6 +102,9 @@ enum class GnuStackKind { None, Exec, NoExec };
// For --lto=
enum LtoKind : uint8_t {UnifiedThin, UnifiedRegular, Default};
+// For -z gcs=
+enum class GcsPolicy { Implicit, Never, Always };
+
struct SymbolVersion {
llvm::StringRef name;
bool isExternCpp;
@@ -188,6 +191,7 @@ struct Config {
StringRef zBtiReport = "none";
StringRef zCetReport = "none";
StringRef zPauthReport = "none";
+ StringRef zGcsReport = "none";
bool ltoBBAddrMap;
llvm::StringRef ltoBasicBlockSections;
std::pair<llvm::StringRef, llvm::StringRef> thinLTOObjectSuffixReplace;
@@ -341,6 +345,7 @@ struct Config {
UnresolvedPolicy unresolvedSymbols;
UnresolvedPolicy unresolvedSymbolsInShlib;
Target2Policy target2;
+ GcsPolicy zGcs;
bool power10Stubs;
ARMVFPArgKind armVFPArgs = ARMVFPArgKind::Default;
BuildIdKind buildId = BuildIdKind::None;
diff --git a/lld/ELF/Driver.cpp b/lld/ELF/Driver.cpp
index 028cdcc83d2f..ddc574a11314 100644
--- a/lld/ELF/Driver.cpp
+++ b/lld/ELF/Driver.cpp
@@ -466,6 +466,10 @@ static void checkOptions() {
error("-z bti-report only supported on AArch64");
if (config->zPauthReport != "none")
error("-z pauth-report only supported on AArch64");
+ if (config->zGcsReport != "none")
+ error("-z gcs-report only supported on AArch64");
+ if (config->zGcs != GcsPolicy::Implicit)
+ error("-z gcs only supported on AArch64");
}
if (config->emachine != EM_386 && config->emachine != EM_X86_64 &&
@@ -560,6 +564,25 @@ static uint8_t getZStartStopVisibility(opt::InputArgList &args) {
return ret;
}
+static GcsPolicy getZGcs(opt::InputArgList &args) {
+ GcsPolicy ret = GcsPolicy::Implicit;
+ for (auto *arg : args.filtered(OPT_z)) {
+ std::pair<StringRef, StringRef> kv = StringRef(arg->getValue()).split('=');
+ if (kv.first == "gcs") {
+ arg->claim();
+ if (kv.second == "implicit")
+ ret = GcsPolicy::Implicit;
+ else if (kv.second == "never")
+ ret = GcsPolicy::Never;
+ else if (kv.second == "always")
+ ret = GcsPolicy::Always;
+ else
+ error("unknown -z gcs= value: " + kv.second);
+ }
+ }
+ return ret;
+}
+
// Report a warning for an unknown -z option.
static void checkZOptions(opt::InputArgList &args) {
// This function is called before getTarget(), when certain options are not
@@ -1438,6 +1461,7 @@ static void readConfigs(opt::InputArgList &args) {
config->zCopyreloc = getZFlag(args, "copyreloc", "nocopyreloc", true);
config->zForceBti = hasZOption(args, "force-bti");
config->zForceIbt = hasZOption(args, "force-ibt");
+ config->zGcs = getZGcs(args);
config->zGlobal = hasZOption(args, "global");
config->zGnustack = getZGnuStack(args);
config->zHazardplt = hasZOption(args, "hazardplt");
@@ -1510,6 +1534,7 @@ static void readConfigs(opt::InputArgList &args) {
auto reports = {std::make_pair("bti-report", &config->zBtiReport),
std::make_pair("cet-report", &config->zCetReport),
+ std::make_pair("gcs-report", &config->zGcsReport),
std::make_pair("pauth-report", &config->zPauthReport)};
for (opt::Arg *arg : args.filtered(OPT_z)) {
std::pair<StringRef, StringRef> option =
@@ -2678,6 +2703,11 @@ static void readSecurityNotes() {
"GNU_PROPERTY_AARCH64_FEATURE_1_BTI property");
checkAndReportMissingFeature(
+ config->zGcsReport, features, GNU_PROPERTY_AARCH64_FEATURE_1_GCS,
+ toString(f) + ": -z gcs-report: file does not have "
+ "GNU_PROPERTY_AARCH64_FEATURE_1_GCS property");
+
+ checkAndReportMissingFeature(
config->zCetReport, features, GNU_PROPERTY_X86_FEATURE_1_IBT,
toString(f) + ": -z cet-report: file does not have "
"GNU_PROPERTY_X86_FEATURE_1_IBT property");
@@ -2729,6 +2759,12 @@ static void readSecurityNotes() {
// Force enable Shadow Stack.
if (config->zShstk)
config->andFeatures |= GNU_PROPERTY_X86_FEATURE_1_SHSTK;
+
+ // Force enable/disable GCS
+ if (config->zGcs == GcsPolicy::Always)
+ config->andFeatures |= GNU_PROPERTY_AARCH64_FEATURE_1_GCS;
+ else if (config->zGcs == GcsPolicy::Never)
+ config->andFeatures &= ~GNU_PROPERTY_AARCH64_FEATURE_1_GCS;
}
static void initSectionsAndLocalSyms(ELFFileBase *file, bool ignoreComdats) {
diff --git a/lld/test/COFF/arm64ec-exports.s b/lld/test/COFF/arm64ec-exports.s
new file mode 100644
index 000000000000..a48211e6fb76
--- /dev/null
+++ b/lld/test/COFF/arm64ec-exports.s
@@ -0,0 +1,121 @@
+; REQUIRES: aarch64
+; RUN: split-file %s %t.dir && cd %t.dir
+
+; RUN: llvm-mc -filetype=obj -triple=arm64ec-windows test.s -o test.obj
+; RUN: llvm-mc -filetype=obj -triple=arm64ec-windows drectve.s -o drectve.obj
+; RUN: llvm-mc -filetype=obj -triple=arm64ec-windows %S/Inputs/loadconfig-arm64ec.s -o loadconfig-arm64ec.obj
+
+; Check various forms of export directive and make sure that function export name is demangled.
+
+; RUN: lld-link -out:out.dll test.obj loadconfig-arm64ec.obj -dll -noentry -machine:arm64ec \
+; RUN: -export:unmangled_func '-export:#mangled_func' '-export:#exportas_func,EXPORTAS,exportas_func' \
+; RUN: '-export:?cxx_func@@$$hYAHXZ' -export:data_sym,DATA '-export:#mangled_data_sym,DATA'
+
+
+; RUN: llvm-readobj --coff-exports out.dll | FileCheck --check-prefix=EXP %s
+; EXP: Export {
+; EXP-NEXT: Ordinal: 1
+; EXP-NEXT: Name: #mangled_data_sym
+; EXP-NEXT: RVA: 0x3000
+; EXP-NEXT: }
+; EXP-NEXT: Export {
+; EXP-NEXT: Ordinal: 2
+; EXP-NEXT: Name: ?cxx_func@@YAHXZ
+; EXP-NEXT: RVA: 0x1018
+; EXP-NEXT: }
+; EXP-NEXT: Export {
+; EXP-NEXT: Ordinal: 3
+; EXP-NEXT: Name: data_sym
+; EXP-NEXT: RVA: 0x3004
+; EXP-NEXT: }
+; EXP-NEXT: Export {
+; EXP-NEXT: Ordinal: 4
+; EXP-NEXT: Name: exportas_func
+; EXP-NEXT: RVA: 0x1010
+; EXP-NEXT: }
+; EXP-NEXT: Export {
+; EXP-NEXT: Ordinal: 5
+; EXP-NEXT: Name: mangled_func
+; EXP-NEXT: RVA: 0x1008
+; EXP-NEXT: }
+; EXP-NEXT: Export {
+; EXP-NEXT: Ordinal: 6
+; EXP-NEXT: Name: unmangled_func
+; EXP-NEXT: RVA: 0x1000
+; EXP-NEXT: }
+
+; RUN: llvm-nm --print-armap out.lib | FileCheck --check-prefix=IMPLIB %s
+; IMPLIB: Archive EC map
+; IMPLIB-NEXT: #exportas_func in out
+; IMPLIB-NEXT: #mangled_func in out
+; IMPLIB-NEXT: #unmangled_func in out
+; IMPLIB-NEXT: ?cxx_func@@$$hYAHXZ in out
+; IMPLIB-NEXT: ?cxx_func@@YAHXZ in out
+; IMPLIB-NEXT: __IMPORT_DESCRIPTOR_out{{.*}} in out
+; IMPLIB-NEXT: __NULL_IMPORT_DESCRIPTOR in out
+; IMPLIB-NEXT: __imp_?cxx_func@@YAHXZ in out
+; IMPLIB-NEXT: __imp_aux_?cxx_func@@YAHXZ in out
+; IMPLIB-NEXT: __imp_aux_exportas_func in out
+; IMPLIB-NEXT: __imp_aux_mangled_func in out
+; IMPLIB-NEXT: __imp_aux_unmangled_func in out
+; IMPLIB-NEXT: __imp_data_sym in out
+; IMPLIB-NEXT: __imp_exportas_func in out
+; IMPLIB-NEXT: __imp_mangled_data_sym in out
+; IMPLIB-NEXT: __imp_mangled_func in out
+; IMPLIB-NEXT: __imp_unmangled_func in out
+; IMPLIB-NEXT: exportas_func in out
+; IMPLIB-NEXT: mangled_func in out
+; IMPLIB-NEXT: unmangled_func in out
+; IMPLIB-NEXT: out{{.*}}_NULL_THUNK_DATA in out
+
+
+; Check that using .drectve section has the same effect.
+
+; RUN: lld-link -out:out2.dll test.obj loadconfig-arm64ec.obj -dll -noentry -machine:arm64ec drectve.obj
+; RUN: llvm-readobj --coff-exports out2.dll | FileCheck --check-prefix=EXP %s
+; RUN: llvm-nm --print-armap out2.lib | FileCheck --check-prefix=IMPLIB %s
+
+#--- test.s
+ .text
+ .globl unmangled_func
+ .p2align 2, 0x0
+unmangled_func:
+ mov w0, #1
+ ret
+
+ .globl "#mangled_func"
+ .p2align 2, 0x0
+"#mangled_func":
+ mov w0, #2
+ ret
+
+ .globl "#exportas_func"
+ .p2align 2, 0x0
+"#exportas_func":
+ mov w0, #3
+ ret
+
+ .globl "?cxx_func@@$$hYAHXZ"
+ .p2align 2, 0x0
+"?cxx_func@@$$hYAHXZ":
+ mov w0, #4
+ ret
+
+ .data
+ .globl "#mangled_data_sym"
+ .p2align 2, 0x0
+"#mangled_data_sym":
+ .word 0x01010101
+ .globl data_sym
+ .p2align 2, 0x0
+data_sym:
+ .word 0x01010101
+
+#--- drectve.s
+ .section .drectve, "yn"
+ .ascii " -export:unmangled_func"
+ .ascii " -export:#mangled_func"
+ .ascii " -export:#exportas_func,EXPORTAS,exportas_func"
+ .ascii " -export:?cxx_func@@$$hYAHXZ"
+ .ascii " -export:data_sym,DATA"
+ .ascii " -export:#mangled_data_sym,DATA"
diff --git a/lld/test/ELF/aarch64-feature-gcs.s b/lld/test/ELF/aarch64-feature-gcs.s
new file mode 100644
index 000000000000..7a08673dbb7e
--- /dev/null
+++ b/lld/test/ELF/aarch64-feature-gcs.s
@@ -0,0 +1,134 @@
+# REQUIRES: aarch64
+# RUN: rm -rf %t && split-file %s %t && cd %t
+# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-gnu func1-gcs.s -o func1-gcs.o
+# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-gnu func2.s -o func2.o
+# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-gnu func2-gcs.s -o func2-gcs.o
+# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-gnu func3.s -o func3.o
+# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-gnu func3-gcs.s -o func3-gcs.o
+
+## GCS should be enabled when it's enabled in all inputs or when it's forced on.
+
+# RUN: ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -o gcs
+# RUN: llvm-readelf -n gcs | FileCheck --check-prefix GCS %s
+# RUN: ld.lld func1-gcs.o func3-gcs.o --shared -o gcs.so
+# RUN: llvm-readelf -n gcs.so | FileCheck --check-prefix GCS %s
+# RUN: ld.lld func1-gcs.o func2.o func3-gcs.o -o force-gcs -z gcs=always
+# RUN: llvm-readelf -n force-gcs | FileCheck --check-prefix GCS %s
+# RUN: ld.lld func2-gcs.o func3.o --shared -o force-gcs.so -z gcs=always
+# RUN: llvm-readelf -n force-gcs.so | FileCheck --check-prefix GCS %s
+# RUN: ld.lld func2-gcs.o func3.o --shared -o force-gcs2.so -z gcs=never -z gcs=always
+# RUN: llvm-readelf -n force-gcs2.so | FileCheck --check-prefix GCS %s
+
+# GCS: Properties: aarch64 feature: GCS
+
+## GCS should not be enabled if it's not enabled in at least one input.
+
+# RUN: ld.lld func1-gcs.o func2.o func3-gcs.o -o no-gcs
+# RUN: llvm-readelf -n no-gcs | count 0
+# RUN: ld.lld func2-gcs.o func3.o --shared -o no-gcs.so
+
+## GCS should be disabled with gcs=never, even if GCS is present in all inputs.
+
+# RUN: ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -z gcs=never -o never-gcs
+# RUN: llvm-readelf -n never-gcs | count 0
+# RUN: ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -z gcs=always -z gcs=never -o never-gcs2
+# RUN: llvm-readelf -n never-gcs2 | count 0
+
+## gcs-report should report any input files that don't have the gcs property.
+
+# RUN: ld.lld func1-gcs.o func2.o func3-gcs.o -o /dev/null -z gcs-report=warning 2>&1 | FileCheck --check-prefix=REPORT-WARN %s
+# RUN: ld.lld func1-gcs.o func2.o func3-gcs.o -o /dev/null -z gcs-report=warning -z gcs=always 2>&1 | FileCheck --check-prefix=REPORT-WARN %s
+# RUN: ld.lld func1-gcs.o func2.o func3-gcs.o -o /dev/null -z gcs-report=warning -z gcs=never 2>&1 | FileCheck --check-prefix=REPORT-WARN %s
+# RUN: not ld.lld func2-gcs.o func3.o --shared -o /dev/null -z gcs-report=error 2>&1 | FileCheck --check-prefix=REPORT-ERROR %s
+# RUN: not ld.lld func2-gcs.o func3.o --shared -o /dev/null -z gcs-report=error -z gcs=always 2>&1 | FileCheck --check-prefix=REPORT-ERROR %s
+# RUN: not ld.lld func2-gcs.o func3.o --shared -o /dev/null -z gcs-report=error -z gcs=never 2>&1 | FileCheck --check-prefix=REPORT-ERROR %s
+# RUN: ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -o /dev/null -z gcs-report=warning 2>&1 | count 0
+# RUN: ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -o /dev/null -z gcs-report=warning -z gcs=always 2>&1 | count 0
+# RUN: ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -o /dev/null -z gcs-report=warning -z gcs=never 2>&1 | count 0
+
+# REPORT-WARN: warning: func2.o: -z gcs-report: file does not have GNU_PROPERTY_AARCH64_FEATURE_1_GCS property
+# REPORT-ERROR: error: func3.o: -z gcs-report: file does not have GNU_PROPERTY_AARCH64_FEATURE_1_GCS property
+
+## An invalid gcs option should give an error
+# RUN: not ld.lld func1-gcs.o func2-gcs.o func3-gcs.o -z gcs=nonsense 2>&1 | FileCheck --check-prefix=INVALID %s
+
+# INVALID: error: unknown -z gcs= value: nonsense
+
+#--- func1-gcs.s
+.section ".note.gnu.property", "a"
+.long 4
+.long 0x10
+.long 0x5
+.asciz "GNU"
+
+.long 0xc0000000 // GNU_PROPERTY_AARCH64_FEATURE_1_AND
+.long 4
+.long 4 // GNU_PROPERTY_AARCH64_FEATURE_1_GCS
+.long 0
+
+.text
+.globl _start
+.type func1,%function
+func1:
+ bl func2
+ ret
+
+#--- func2.s
+
+.text
+.globl func2
+.type func2,@function
+func2:
+ .globl func3
+ .type func3, @function
+ bl func3
+ ret
+
+#--- func2-gcs.s
+
+.section ".note.gnu.property", "a"
+.long 4
+.long 0x10
+.long 0x5
+.asciz "GNU"
+
+.long 0xc0000000 // GNU_PROPERTY_AARCH64_FEATURE_1_AND
+.long 4
+.long 4 // GNU_PROPERTY_AARCH64_FEATURE_1_GCS
+.long 0
+
+.text
+.globl func2
+.type func2,@function
+func2:
+ .globl func3
+ .type func3, @function
+ bl func3
+ ret
+
+#--- func3.s
+
+.text
+.globl func3
+.type func3,@function
+func3:
+ ret
+
+#--- func3-gcs.s
+
+.section ".note.gnu.property", "a"
+.long 4
+.long 0x10
+.long 0x5
+.asciz "GNU"
+
+.long 0xc0000000 // GNU_PROPERTY_AARCH64_FEATURE_1_AND
+.long 4
+.long 4 // GNU_PROPERTY_AARCH64_FEATURE_1_GCS
+.long 0
+
+.text
+.globl func3
+.type func3,@function
+func3:
+ ret
diff --git a/lldb/include/lldb/API/SBCommandInterpreter.h b/lldb/include/lldb/API/SBCommandInterpreter.h
index ba2e049204b8..8ac36344b3a7 100644
--- a/lldb/include/lldb/API/SBCommandInterpreter.h
+++ b/lldb/include/lldb/API/SBCommandInterpreter.h
@@ -318,6 +318,14 @@ public:
SBStructuredData GetStatistics();
+ /// Returns a list of handled commands, output and error. Each element in
+ /// the list is a dictionary with the following keys/values:
+ /// - "command" (string): The command that was executed.
+ /// - "output" (string): The output of the command. Empty ("") if no output.
+ /// - "error" (string): The error of the command. Empty ("") if no error.
+ /// - "seconds" (float): The time it took to execute the command.
+ SBStructuredData GetTranscript();
+
protected:
friend class lldb_private::CommandPluginInterfaceImplementation;
diff --git a/lldb/include/lldb/API/SBDebugger.h b/lldb/include/lldb/API/SBDebugger.h
index 7333cd57ad31..af19b1faf3bf 100644
--- a/lldb/include/lldb/API/SBDebugger.h
+++ b/lldb/include/lldb/API/SBDebugger.h
@@ -328,9 +328,22 @@ public:
void SetLoggingCallback(lldb::LogOutputCallback log_callback, void *baton);
+ /// Clear all previously added callbacks and only add the given one.
+ LLDB_DEPRECATED_FIXME("Use AddDestroyCallback and RemoveDestroyCallback",
+ "AddDestroyCallback")
void SetDestroyCallback(lldb::SBDebuggerDestroyCallback destroy_callback,
void *baton);
+ /// Add a callback for when the debugger is destroyed. Return a token, which
+ /// can be used to remove said callback. Multiple callbacks can be added by
+ /// calling this function multiple times, and will be invoked in FIFO order.
+ lldb::callback_token_t
+ AddDestroyCallback(lldb::SBDebuggerDestroyCallback destroy_callback,
+ void *baton);
+
+ /// Remove the specified callback. Return true if successful.
+ bool RemoveDestroyCallback(lldb::callback_token_t token);
+
#ifndef SWIG
LLDB_DEPRECATED_FIXME("Use DispatchInput(const void *, size_t)",
"DispatchInput(const void *, size_t)")
diff --git a/lldb/include/lldb/Core/Debugger.h b/lldb/include/lldb/Core/Debugger.h
index ea994bf8c28d..a72c2596cc2c 100644
--- a/lldb/include/lldb/Core/Debugger.h
+++ b/lldb/include/lldb/Core/Debugger.h
@@ -40,6 +40,7 @@
#include "lldb/lldb-types.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/Support/DynamicLibrary.h"
@@ -559,10 +560,25 @@ public:
static void ReportSymbolChange(const ModuleSpec &module_spec);
+ /// DEPRECATED: We used to only support one Destroy callback. Now that we
+ /// support Add and Remove, you should only remove callbacks that you added.
+ /// Use Add and Remove instead.
+ ///
+ /// Clear all previously added callbacks and only add the given one.
void
SetDestroyCallback(lldb_private::DebuggerDestroyCallback destroy_callback,
void *baton);
+ /// Add a callback for when the debugger is destroyed. Return a token, which
+ /// can be used to remove said callback. Multiple callbacks can be added by
+ /// calling this function multiple times, and will be invoked in FIFO order.
+ lldb::callback_token_t
+ AddDestroyCallback(lldb_private::DebuggerDestroyCallback destroy_callback,
+ void *baton);
+
+ /// Remove the specified callback. Return true if successful.
+ bool RemoveDestroyCallback(lldb::callback_token_t token);
+
/// Manually start the global event handler thread. It is useful to plugins
/// that directly use the \a lldb_private namespace and want to use the
/// debugger's default event handler thread instead of defining their own.
@@ -721,8 +737,19 @@ protected:
lldb::TargetSP m_dummy_target_sp;
Diagnostics::CallbackID m_diagnostics_callback_id;
- lldb_private::DebuggerDestroyCallback m_destroy_callback = nullptr;
- void *m_destroy_callback_baton = nullptr;
+ std::mutex m_destroy_callback_mutex;
+ lldb::callback_token_t m_destroy_callback_next_token = 0;
+ struct DestroyCallbackInfo {
+ DestroyCallbackInfo() {}
+ DestroyCallbackInfo(lldb::callback_token_t token,
+ lldb_private::DebuggerDestroyCallback callback,
+ void *baton)
+ : token(token), callback(callback), baton(baton) {}
+ lldb::callback_token_t token;
+ lldb_private::DebuggerDestroyCallback callback;
+ void *baton;
+ };
+ llvm::SmallVector<DestroyCallbackInfo, 2> m_destroy_callbacks;
uint32_t m_interrupt_requested = 0; ///< Tracks interrupt requests
std::mutex m_interrupt_mutex;
diff --git a/lldb/include/lldb/Interpreter/CommandInterpreter.h b/lldb/include/lldb/Interpreter/CommandInterpreter.h
index 70a55a77465b..ccc30cf4f1a8 100644
--- a/lldb/include/lldb/Interpreter/CommandInterpreter.h
+++ b/lldb/include/lldb/Interpreter/CommandInterpreter.h
@@ -22,6 +22,7 @@
#include "lldb/Utility/Log.h"
#include "lldb/Utility/StreamString.h"
#include "lldb/Utility/StringList.h"
+#include "lldb/Utility/StructuredData.h"
#include "lldb/lldb-forward.h"
#include "lldb/lldb-private.h"
@@ -560,6 +561,9 @@ public:
bool GetPromptOnQuit() const;
void SetPromptOnQuit(bool enable);
+ bool GetSaveTranscript() const;
+ void SetSaveTranscript(bool enable);
+
bool GetSaveSessionOnQuit() const;
void SetSaveSessionOnQuit(bool enable);
@@ -647,6 +651,7 @@ public:
}
llvm::json::Value GetStatistics();
+ const StructuredData::Array &GetTranscript() const;
protected:
friend class Debugger;
@@ -765,7 +770,20 @@ private:
typedef llvm::StringMap<uint64_t> CommandUsageMap;
CommandUsageMap m_command_usages;
+ /// Turn on settings `interpreter.save-transcript` for LLDB to populate
+ /// this stream. Otherwise this stream is empty.
StreamString m_transcript_stream;
+
+ /// Contains a list of handled commands and their details. Each element in
+ /// the list is a dictionary with the following keys/values:
+ /// - "command" (string): The command that was executed.
+ /// - "output" (string): The output of the command. Empty ("") if no output.
+ /// - "error" (string): The error of the command. Empty ("") if no error.
+ /// - "seconds" (float): The time it took to execute the command.
+ ///
+ /// Turn on settings `interpreter.save-transcript` for LLDB to populate
+ /// this list. Otherwise this list is empty.
+ StructuredData::Array m_transcript;
};
} // namespace lldb_private
diff --git a/lldb/include/lldb/lldb-types.h b/lldb/include/lldb/lldb-types.h
index d60686e33142..d88b8232ee6b 100644
--- a/lldb/include/lldb/lldb-types.h
+++ b/lldb/include/lldb/lldb-types.h
@@ -68,6 +68,7 @@ typedef int pipe_t; // Host pipe type
#define LLDB_INVALID_PROCESS ((lldb::process_t)-1)
#define LLDB_INVALID_HOST_THREAD ((lldb::thread_t)NULL)
#define LLDB_INVALID_PIPE ((lldb::pipe_t)-1)
+#define LLDB_INVALID_CALLBACK_TOKEN ((lldb::callback_token_t) - 1)
typedef void (*LogOutputCallback)(const char *, void *baton);
typedef bool (*CommandOverrideCallback)(void *baton, const char **argv);
@@ -77,6 +78,7 @@ typedef bool (*ExpressionCancelCallback)(ExpressionEvaluationPhase phase,
typedef void *ScriptObjectPtr;
typedef uint64_t addr_t;
+typedef int32_t callback_token_t;
typedef uint64_t user_id_t;
typedef uint64_t pid_t;
typedef uint64_t tid_t;
diff --git a/lldb/source/API/SBCommandInterpreter.cpp b/lldb/source/API/SBCommandInterpreter.cpp
index 83c0951c56db..7a3547328368 100644
--- a/lldb/source/API/SBCommandInterpreter.cpp
+++ b/lldb/source/API/SBCommandInterpreter.cpp
@@ -6,6 +6,7 @@
//
//===----------------------------------------------------------------------===//
+#include "lldb/Utility/StructuredData.h"
#include "lldb/lldb-types.h"
#include "lldb/Interpreter/CommandInterpreter.h"
@@ -571,6 +572,21 @@ SBStructuredData SBCommandInterpreter::GetStatistics() {
return data;
}
+SBStructuredData SBCommandInterpreter::GetTranscript() {
+ LLDB_INSTRUMENT_VA(this);
+
+ SBStructuredData data;
+ if (IsValid())
+ // A deep copy is performed by `std::make_shared` on the
+ // `StructuredData::Array`, via its implicitly-declared copy constructor.
+ // This ensures thread-safety between the user changing the returned
+ // `SBStructuredData` and the `CommandInterpreter` changing its internal
+ // `m_transcript`.
+ data.m_impl_up->SetObjectSP(
+ std::make_shared<StructuredData::Array>(m_opaque_ptr->GetTranscript()));
+ return data;
+}
+
lldb::SBCommand SBCommandInterpreter::AddMultiwordCommand(const char *name,
const char *help) {
LLDB_INSTRUMENT_VA(this, name, help);
diff --git a/lldb/source/API/SBDebugger.cpp b/lldb/source/API/SBDebugger.cpp
index 9c662dfbf441..7ef0d6efd4aa 100644
--- a/lldb/source/API/SBDebugger.cpp
+++ b/lldb/source/API/SBDebugger.cpp
@@ -1695,6 +1695,26 @@ void SBDebugger::SetDestroyCallback(
}
}
+lldb::callback_token_t
+SBDebugger::AddDestroyCallback(lldb::SBDebuggerDestroyCallback destroy_callback,
+ void *baton) {
+ LLDB_INSTRUMENT_VA(this, destroy_callback, baton);
+
+ if (m_opaque_sp)
+ return m_opaque_sp->AddDestroyCallback(destroy_callback, baton);
+
+ return LLDB_INVALID_CALLBACK_TOKEN;
+}
+
+bool SBDebugger::RemoveDestroyCallback(lldb::callback_token_t token) {
+ LLDB_INSTRUMENT_VA(this, token);
+
+ if (m_opaque_sp)
+ return m_opaque_sp->RemoveDestroyCallback(token);
+
+ return false;
+}
+
SBTrace
SBDebugger::LoadTraceFromFile(SBError &error,
const SBFileSpec &trace_description_file) {
diff --git a/lldb/source/Commands/CommandObjectThread.cpp b/lldb/source/Commands/CommandObjectThread.cpp
index 4397ee14ea07..db96ee2cec38 100644
--- a/lldb/source/Commands/CommandObjectThread.cpp
+++ b/lldb/source/Commands/CommandObjectThread.cpp
@@ -114,8 +114,8 @@ public:
CommandObjectThreadBacktrace(CommandInterpreter &interpreter)
: CommandObjectIterateOverThreads(
interpreter, "thread backtrace",
- "Show thread call stacks. Defaults to the current thread, thread "
- "indexes can be specified as arguments.\n"
+ "Show backtraces of thread call stacks. Defaults to the current "
+ "thread, thread indexes can be specified as arguments.\n"
"Use the thread-index \"all\" to see all threads.\n"
"Use the thread-index \"unique\" to see threads grouped by unique "
"call stacks.\n"
diff --git a/lldb/source/Core/Debugger.cpp b/lldb/source/Core/Debugger.cpp
index 9951fbcd3e7c..309e01e45658 100644
--- a/lldb/source/Core/Debugger.cpp
+++ b/lldb/source/Core/Debugger.cpp
@@ -743,9 +743,22 @@ DebuggerSP Debugger::CreateInstance(lldb::LogOutputCallback log_callback,
}
void Debugger::HandleDestroyCallback() {
- if (m_destroy_callback) {
- m_destroy_callback(GetID(), m_destroy_callback_baton);
- m_destroy_callback = nullptr;
+ const lldb::user_id_t user_id = GetID();
+ // Invoke and remove all the callbacks in an FIFO order. Callbacks which are
+ // added during this loop will be appended, invoked and then removed last.
+ // Callbacks which are removed during this loop will not be invoked.
+ while (true) {
+ DestroyCallbackInfo callback_info;
+ {
+ std::lock_guard<std::mutex> guard(m_destroy_callback_mutex);
+ if (m_destroy_callbacks.empty())
+ break;
+ // Pop the first item in the list
+ callback_info = m_destroy_callbacks.front();
+ m_destroy_callbacks.erase(m_destroy_callbacks.begin());
+ }
+ // Call the destroy callback with user id and baton
+ callback_info.callback(user_id, callback_info.baton);
}
}
@@ -1427,8 +1440,30 @@ void Debugger::SetLoggingCallback(lldb::LogOutputCallback log_callback,
void Debugger::SetDestroyCallback(
lldb_private::DebuggerDestroyCallback destroy_callback, void *baton) {
- m_destroy_callback = destroy_callback;
- m_destroy_callback_baton = baton;
+ std::lock_guard<std::mutex> guard(m_destroy_callback_mutex);
+ m_destroy_callbacks.clear();
+ const lldb::callback_token_t token = m_destroy_callback_next_token++;
+ m_destroy_callbacks.emplace_back(token, destroy_callback, baton);
+}
+
+lldb::callback_token_t Debugger::AddDestroyCallback(
+ lldb_private::DebuggerDestroyCallback destroy_callback, void *baton) {
+ std::lock_guard<std::mutex> guard(m_destroy_callback_mutex);
+ const lldb::callback_token_t token = m_destroy_callback_next_token++;
+ m_destroy_callbacks.emplace_back(token, destroy_callback, baton);
+ return token;
+}
+
+bool Debugger::RemoveDestroyCallback(lldb::callback_token_t token) {
+ std::lock_guard<std::mutex> guard(m_destroy_callback_mutex);
+ for (auto it = m_destroy_callbacks.begin(); it != m_destroy_callbacks.end();
+ ++it) {
+ if (it->token == token) {
+ m_destroy_callbacks.erase(it);
+ return true;
+ }
+ }
+ return false;
}
static void PrivateReportProgress(Debugger &debugger, uint64_t progress_id,
diff --git a/lldb/source/Interpreter/CommandInterpreter.cpp b/lldb/source/Interpreter/CommandInterpreter.cpp
index 4c58ecc3c184..7f21f382adb8 100644
--- a/lldb/source/Interpreter/CommandInterpreter.cpp
+++ b/lldb/source/Interpreter/CommandInterpreter.cpp
@@ -51,6 +51,7 @@
#include "lldb/Utility/Log.h"
#include "lldb/Utility/State.h"
#include "lldb/Utility/Stream.h"
+#include "lldb/Utility/StructuredData.h"
#include "lldb/Utility/Timer.h"
#include "lldb/Host/Config.h"
@@ -161,6 +162,17 @@ void CommandInterpreter::SetPromptOnQuit(bool enable) {
SetPropertyAtIndex(idx, enable);
}
+bool CommandInterpreter::GetSaveTranscript() const {
+ const uint32_t idx = ePropertySaveTranscript;
+ return GetPropertyAtIndexAs<bool>(
+ idx, g_interpreter_properties[idx].default_uint_value != 0);
+}
+
+void CommandInterpreter::SetSaveTranscript(bool enable) {
+ const uint32_t idx = ePropertySaveTranscript;
+ SetPropertyAtIndex(idx, enable);
+}
+
bool CommandInterpreter::GetSaveSessionOnQuit() const {
const uint32_t idx = ePropertySaveSessionOnQuit;
return GetPropertyAtIndexAs<bool>(
@@ -816,11 +828,11 @@ void CommandInterpreter::LoadCommandDictionary() {
std::unique_ptr<CommandObjectRegexCommand> bt_regex_cmd_up(
new CommandObjectRegexCommand(
*this, "_regexp-bt",
- "Show the current thread's call stack. Any numeric argument "
- "displays at most that many "
- "frames. The argument 'all' displays all threads. Use 'settings"
- " set frame-format' to customize the printing of individual frames "
- "and 'settings set thread-format' to customize the thread header.",
+ "Show backtrace of the current thread's call stack. Any numeric "
+ "argument displays at most that many frames. The argument 'all' "
+ "displays all threads. Use 'settings set frame-format' to customize "
+ "the printing of individual frames and 'settings set thread-format' "
+ "to customize the thread header.",
"bt [<digit> | all]", 0, false));
if (bt_regex_cmd_up) {
// accept but don't document "bt -c <number>" -- before bt was a regex
@@ -1889,7 +1901,16 @@ bool CommandInterpreter::HandleCommand(const char *command_line,
else
add_to_history = (lazy_add_to_history == eLazyBoolYes);
- m_transcript_stream << "(lldb) " << command_line << '\n';
+ // The same `transcript_item` will be used below to add output and error of
+ // the command.
+ StructuredData::DictionarySP transcript_item;
+ if (GetSaveTranscript()) {
+ m_transcript_stream << "(lldb) " << command_line << '\n';
+
+ transcript_item = std::make_shared<StructuredData::Dictionary>();
+ transcript_item->AddStringItem("command", command_line);
+ m_transcript.AddItem(transcript_item);
+ }
bool empty_command = false;
bool comment_command = false;
@@ -1994,7 +2015,7 @@ bool CommandInterpreter::HandleCommand(const char *command_line,
// Take care of things like setting up the history command & calling the
// appropriate Execute method on the CommandObject, with the appropriate
// arguments.
-
+ StatsDuration execute_time;
if (cmd_obj != nullptr) {
bool generate_repeat_command = add_to_history;
// If we got here when empty_command was true, then this command is a
@@ -2035,14 +2056,24 @@ bool CommandInterpreter::HandleCommand(const char *command_line,
log, "HandleCommand, command line after removing command name(s): '%s'",
remainder.c_str());
+ ElapsedTime elapsed(execute_time);
cmd_obj->Execute(remainder.c_str(), result);
}
LLDB_LOGF(log, "HandleCommand, command %s",
(result.Succeeded() ? "succeeded" : "did not succeed"));
- m_transcript_stream << result.GetOutputData();
- m_transcript_stream << result.GetErrorData();
+ // To test whether or not transcript should be saved, `transcript_item` is
+ // used instead of `GetSaveTrasncript()`. This is because the latter will
+ // fail when the command is "settings set interpreter.save-transcript true".
+ if (transcript_item) {
+ m_transcript_stream << result.GetOutputData();
+ m_transcript_stream << result.GetErrorData();
+
+ transcript_item->AddStringItem("output", result.GetOutputData());
+ transcript_item->AddStringItem("error", result.GetErrorData());
+ transcript_item->AddFloatItem("seconds", execute_time.get().count());
+ }
return result.Succeeded();
}
@@ -3554,3 +3585,7 @@ llvm::json::Value CommandInterpreter::GetStatistics() {
stats.try_emplace(command_usage.getKey(), command_usage.getValue());
return stats;
}
+
+const StructuredData::Array &CommandInterpreter::GetTranscript() const {
+ return m_transcript;
+}
diff --git a/lldb/source/Interpreter/InterpreterProperties.td b/lldb/source/Interpreter/InterpreterProperties.td
index 2155ee61ccff..a5fccbbca091 100644
--- a/lldb/source/Interpreter/InterpreterProperties.td
+++ b/lldb/source/Interpreter/InterpreterProperties.td
@@ -9,6 +9,10 @@ let Definition = "interpreter" in {
Global,
DefaultTrue,
Desc<"If true, LLDB will prompt you before quitting if there are any live processes being debugged. If false, LLDB will quit without asking in any case.">;
+ def SaveTranscript: Property<"save-transcript", "Boolean">,
+ Global,
+ DefaultFalse,
+ Desc<"If true, commands will be saved into a transcript buffer for user access.">;
def SaveSessionOnQuit: Property<"save-session-on-quit", "Boolean">,
Global,
DefaultFalse,
diff --git a/lldb/source/Plugins/Instruction/ARM64/EmulateInstructionARM64.cpp b/lldb/source/Plugins/Instruction/ARM64/EmulateInstructionARM64.cpp
index 6ca4fb052457..62ecac3e0831 100644
--- a/lldb/source/Plugins/Instruction/ARM64/EmulateInstructionARM64.cpp
+++ b/lldb/source/Plugins/Instruction/ARM64/EmulateInstructionARM64.cpp
@@ -444,6 +444,8 @@ bool EmulateInstructionARM64::CreateFunctionEntryUnwind(
// Our previous Call Frame Address is the stack pointer
row->GetCFAValue().SetIsRegisterPlusOffset(gpr_sp_arm64, 0);
+ row->SetRegisterLocationToSame(gpr_lr_arm64, /*must_replace=*/false);
+ row->SetRegisterLocationToSame(gpr_fp_arm64, /*must_replace=*/false);
unwind_plan.AppendRow(row);
unwind_plan.SetSourceName("EmulateInstructionARM64");
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.cpp
index 44febcfac3b0..d28da728728e 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.cpp
@@ -259,9 +259,3 @@ DWARFDebugInfo::GetDIE(const DIERef &die_ref) {
return cu->GetNonSkeletonUnit().GetDIE(die_ref.die_offset());
return DWARFDIE(); // Not found
}
-
-llvm::StringRef DWARFDebugInfo::PeekDIEName(const DIERef &die_ref) {
- if (DWARFUnit *cu = GetUnit(die_ref))
- return cu->GetNonSkeletonUnit().PeekDIEName(die_ref.die_offset());
- return llvm::StringRef();
-}
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.h b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.h
index c1f0cb0203fb..456ebd908ccb 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DWARFDebugInfo.h
@@ -44,11 +44,6 @@ public:
bool ContainsTypeUnits();
DWARFDIE GetDIE(const DIERef &die_ref);
- /// Returns the AT_Name of this DIE, if it exists, without parsing the entire
- /// compile unit. An empty is string is returned upon error or if the
- /// attribute is not present.
- llvm::StringRef PeekDIEName(const DIERef &die_ref);
-
enum {
eDumpFlag_Verbose = (1 << 0), // Verbose dumping
eDumpFlag_ShowForm = (1 << 1), // Show the DW_form type
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp
index 4da0d56fdcac..79400e36e04f 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.cpp
@@ -9,7 +9,7 @@
#include "Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h"
#include "Plugins/SymbolFile/DWARF/DWARFDebugInfo.h"
#include "Plugins/SymbolFile/DWARF/DWARFDeclContext.h"
-#include "Plugins/SymbolFile/DWARF/SymbolFileDWARFDwo.h"
+#include "Plugins/SymbolFile/DWARF/LogChannelDWARF.h"
#include "lldb/Core/Module.h"
#include "lldb/Utility/RegularExpression.h"
#include "lldb/Utility/Stream.h"
@@ -48,26 +48,30 @@ DebugNamesDWARFIndex::GetUnits(const DebugNames &debug_names) {
return result;
}
-std::optional<DIERef>
-DebugNamesDWARFIndex::ToDIERef(const DebugNames::Entry &entry) const {
+DWARFUnit *
+DebugNamesDWARFIndex::GetNonSkeletonUnit(const DebugNames::Entry &entry) const {
// Look for a DWARF unit offset (CU offset or local TU offset) as they are
// both offsets into the .debug_info section.
std::optional<uint64_t> unit_offset = entry.getCUOffset();
if (!unit_offset) {
unit_offset = entry.getLocalTUOffset();
if (!unit_offset)
- return std::nullopt;
+ return nullptr;
}
DWARFUnit *cu =
m_debug_info.GetUnitAtOffset(DIERef::Section::DebugInfo, *unit_offset);
- if (!cu)
- return std::nullopt;
+ return cu ? &cu->GetNonSkeletonUnit() : nullptr;
+}
- cu = &cu->GetNonSkeletonUnit();
+std::optional<DIERef>
+DebugNamesDWARFIndex::ToDIERef(const DebugNames::Entry &entry) const {
+ DWARFUnit *unit = GetNonSkeletonUnit(entry);
+ if (!unit)
+ return std::nullopt;
if (std::optional<uint64_t> die_offset = entry.getDIEUnitOffset())
- return DIERef(cu->GetSymbolFileDWARF().GetFileIndex(),
- DIERef::Section::DebugInfo, cu->GetOffset() + *die_offset);
+ return DIERef(unit->GetSymbolFileDWARF().GetFileIndex(),
+ DIERef::Section::DebugInfo, unit->GetOffset() + *die_offset);
return std::nullopt;
}
@@ -306,10 +310,10 @@ bool DebugNamesDWARFIndex::SameParentChain(
auto maybe_dieoffset = entry.getDIEUnitOffset();
if (!maybe_dieoffset)
return false;
- auto die_ref = ToDIERef(entry);
- if (!die_ref)
+ DWARFUnit *unit = GetNonSkeletonUnit(entry);
+ if (!unit)
return false;
- return name == m_debug_info.PeekDIEName(*die_ref);
+ return name == unit->PeekDIEName(unit->GetOffset() + *maybe_dieoffset);
};
// If the AT_name of any parent fails to match the expected name, we don't
diff --git a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h
index b54dd1162d20..81fb8f88b805 100644
--- a/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h
+++ b/lldb/source/Plugins/SymbolFile/DWARF/DebugNamesDWARFIndex.h
@@ -10,7 +10,6 @@
#define LLDB_SOURCE_PLUGINS_SYMBOLFILE_DWARF_DEBUGNAMESDWARFINDEX_H
#include "Plugins/SymbolFile/DWARF/DWARFIndex.h"
-#include "Plugins/SymbolFile/DWARF/LogChannelDWARF.h"
#include "Plugins/SymbolFile/DWARF/ManualDWARFIndex.h"
#include "Plugins/SymbolFile/DWARF/SymbolFileDWARF.h"
#include "lldb/Utility/ConstString.h"
@@ -84,6 +83,7 @@ private:
std::unique_ptr<DebugNames> m_debug_names_up;
ManualDWARFIndex m_fallback;
+ DWARFUnit *GetNonSkeletonUnit(const DebugNames::Entry &entry) const;
std::optional<DIERef> ToDIERef(const DebugNames::Entry &entry) const;
bool ProcessEntry(const DebugNames::Entry &entry,
llvm::function_ref<bool(DWARFDIE die)> callback);
diff --git a/lldb/source/Plugins/UnwindAssembly/InstEmulation/UnwindAssemblyInstEmulation.cpp b/lldb/source/Plugins/UnwindAssembly/InstEmulation/UnwindAssemblyInstEmulation.cpp
index c4a171ec7d01..49edd40544e3 100644
--- a/lldb/source/Plugins/UnwindAssembly/InstEmulation/UnwindAssemblyInstEmulation.cpp
+++ b/lldb/source/Plugins/UnwindAssembly/InstEmulation/UnwindAssemblyInstEmulation.cpp
@@ -424,8 +424,6 @@ size_t UnwindAssemblyInstEmulation::WriteMemory(
log->PutString(strm.GetString());
}
- const bool cant_replace = false;
-
switch (context.type) {
default:
case EmulateInstruction::eContextInvalid:
@@ -467,7 +465,7 @@ size_t UnwindAssemblyInstEmulation::WriteMemory(
m_pushed_regs[reg_num] = addr;
const int32_t offset = addr - m_initial_sp;
m_curr_row->SetRegisterLocationToAtCFAPlusOffset(reg_num, offset,
- cant_replace);
+ /*can_replace=*/true);
m_curr_row_modified = true;
}
}
diff --git a/lldb/source/Target/RegisterContextUnwind.cpp b/lldb/source/Target/RegisterContextUnwind.cpp
index 13e101413a47..e2d712cb72ea 100644
--- a/lldb/source/Target/RegisterContextUnwind.cpp
+++ b/lldb/source/Target/RegisterContextUnwind.cpp
@@ -1555,12 +1555,12 @@ RegisterContextUnwind::SavedLocationForRegister(
}
if (unwindplan_regloc.IsSame()) {
- if (!IsFrameZero() &&
+ if (!m_all_registers_available &&
(regnum.GetAsKind(eRegisterKindGeneric) == LLDB_REGNUM_GENERIC_PC ||
regnum.GetAsKind(eRegisterKindGeneric) == LLDB_REGNUM_GENERIC_RA)) {
UnwindLogMsg("register %s (%d) is marked as 'IsSame' - it is a pc or "
- "return address reg on a non-zero frame -- treat as if we "
- "have no information",
+ "return address reg on a frame which does not have all "
+ "registers available -- treat as if we have no information",
regnum.GetName(), regnum.GetAsKind(eRegisterKindLLDB));
return UnwindLLDB::RegisterSearchResult::eRegisterNotFound;
} else {
diff --git a/lldb/test/API/commands/session/save/TestSessionSave.py b/lldb/test/API/commands/session/save/TestSessionSave.py
index 172a76452304..98985c66010b 100644
--- a/lldb/test/API/commands/session/save/TestSessionSave.py
+++ b/lldb/test/API/commands/session/save/TestSessionSave.py
@@ -25,6 +25,12 @@ class SessionSaveTestCase(TestBase):
raw = ""
interpreter = self.dbg.GetCommandInterpreter()
+ # Make sure "save-transcript" is on, so that all the following setings
+ # and commands are saved into the trasncript. Note that this cannot be
+ # a part of the `settings`, because this command itself won't be saved
+ # into the transcript.
+ self.runCmd("settings set interpreter.save-transcript true")
+
settings = [
"settings set interpreter.echo-commands true",
"settings set interpreter.echo-comment-commands true",
@@ -95,6 +101,12 @@ class SessionSaveTestCase(TestBase):
raw = ""
interpreter = self.dbg.GetCommandInterpreter()
+ # Make sure "save-transcript" is on, so that all the following setings
+ # and commands are saved into the trasncript. Note that this cannot be
+ # a part of the `settings`, because this command itself won't be saved
+ # into the transcript.
+ self.runCmd("settings set interpreter.save-transcript true")
+
td = tempfile.TemporaryDirectory()
settings = [
diff --git a/lldb/test/API/functionalities/bt-interrupt/main.c b/lldb/test/API/functionalities/bt-interrupt/main.c
index bdaf423d334e..14a9eb6ffc85 100644
--- a/lldb/test/API/functionalities/bt-interrupt/main.c
+++ b/lldb/test/API/functionalities/bt-interrupt/main.c
@@ -12,6 +12,7 @@ struct Foo {
int
forgot_termination(int input, struct Foo my_foo) {
+ char frame_increasing_buffer[0x1000]; // To blow the stack sooner.
return forgot_termination(++input, my_foo);
}
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/Makefile b/lldb/test/API/lang/cpp/limit-debug-info/Makefile
index 30230b3469ac..cbb9690bcecb 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/Makefile
+++ b/lldb/test/API/lang/cpp/limit-debug-info/Makefile
@@ -1,5 +1,3 @@
CXX_SOURCES = main.cpp derived.cpp base.cpp
-CFLAGS_EXTRAS = $(LIMIT_DEBUG_INFO_FLAGS)
-
include Makefile.rules
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/TestWithLimitDebugInfo.py b/lldb/test/API/lang/cpp/limit-debug-info/TestWithLimitDebugInfo.py
index a4422cee91f0..ebd9e662c3d4 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/TestWithLimitDebugInfo.py
+++ b/lldb/test/API/lang/cpp/limit-debug-info/TestWithLimitDebugInfo.py
@@ -5,13 +5,8 @@ from lldbsuite.test import lldbutil
class TestWithLimitDebugInfo(TestBase):
- @add_test_categories(["dwarf", "dwo"])
- def test_limit_debug_info(self):
- self.build()
-
- src_file = os.path.join(self.getSourceDir(), "main.cpp")
- src_file_spec = lldb.SBFileSpec(src_file)
- self.assertTrue(src_file_spec.IsValid(), "breakpoint file")
+ def _run_test(self, build_dict):
+ self.build(dictionary=build_dict)
# Get the path of the executable
exe_path = self.getBuildArtifact("a.out")
@@ -21,9 +16,11 @@ class TestWithLimitDebugInfo(TestBase):
self.assertTrue(target.IsValid(), VALID_TARGET)
# Break on main function
- breakpoint = target.BreakpointCreateBySourceRegex("break here", src_file_spec)
- self.assertTrue(
- breakpoint.IsValid() and breakpoint.GetNumLocations() >= 1, VALID_BREAKPOINT
+ lldbutil.run_break_set_by_file_and_line(
+ self, "derived.h", line_number("derived.h", "// break1")
+ )
+ lldbutil.run_break_set_by_file_and_line(
+ self, "derived.h", line_number("derived.h", "// break2")
)
# Launch the process
@@ -32,14 +29,23 @@ class TestWithLimitDebugInfo(TestBase):
# Get the thread of the process
self.assertEqual(process.GetState(), lldb.eStateStopped, PROCESS_STOPPED)
- thread = lldbutil.get_stopped_thread(process, lldb.eStopReasonBreakpoint)
- thread.StepInto()
- # Get frame for current thread
- frame = thread.GetSelectedFrame()
+ self.expect_expr("1", result_type="int", result_value="1")
+ self.expect_expr("this", result_type="Foo *")
+ self.expect_expr("this->x", result_type="int", result_value="12345")
+
+ self.runCmd("continue")
self.expect_expr("1", result_type="int", result_value="1")
+ self.expect_expr("this", result_type="ns::Foo2 *")
+ self.expect_expr("this->x", result_type="int", result_value="23456")
- v2 = frame.EvaluateExpression("this")
- self.assertTrue(v2.IsValid(), "'expr this' results in a valid SBValue object")
- self.assertSuccess(v2.GetError(), "'expr this' succeeds without an error.")
+ @add_test_categories(["dwarf", "dwo"])
+ def test_default(self):
+ self._run_test(dict(CFLAGS_EXTRAS="$(LIMIT_DEBUG_INFO_FLAGS)"))
+
+ @add_test_categories(["dwarf", "dwo"])
+ def test_debug_names(self):
+ self._run_test(
+ dict(CFLAGS_EXTRAS="$(LIMIT_DEBUG_INFO_FLAGS) -gdwarf-5 -gpubnames")
+ )
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/base.cpp b/lldb/test/API/lang/cpp/limit-debug-info/base.cpp
index 296864488820..062eaa3c6f26 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/base.cpp
+++ b/lldb/test/API/lang/cpp/limit-debug-info/base.cpp
@@ -1,8 +1,7 @@
#include "base.h"
-FooNS::FooNS() : x(12345) {}
-
-void FooNS::bar() {
- x = 54321;
-}
+FooBase::FooBase() : x(12345) {}
+ns::Foo2Base::Foo2Base() : x(23456) {}
+void FooBase::bar() {}
+void ns::Foo2Base::bar() {}
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/base.h b/lldb/test/API/lang/cpp/limit-debug-info/base.h
index f4da76701c78..8e4dd17e7007 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/base.h
+++ b/lldb/test/API/lang/cpp/limit-debug-info/base.h
@@ -1,12 +1,22 @@
-class FooNS
-{
+class FooBase {
public:
- virtual void bar();
- virtual char baz() = 0;
+ virtual void bar();
protected:
- FooNS();
+ FooBase();
- int x;
+ int x;
};
+namespace ns {
+class Foo2Base {
+public:
+ virtual void bar();
+
+protected:
+ Foo2Base();
+
+ int x;
+};
+
+} // namespace ns
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/derived.cpp b/lldb/test/API/lang/cpp/limit-debug-info/derived.cpp
index 911fe3d9bc17..cbda8e706b52 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/derived.cpp
+++ b/lldb/test/API/lang/cpp/limit-debug-info/derived.cpp
@@ -1,11 +1,10 @@
#include "derived.h"
-Foo foo1;
-Foo foo2;
-
Foo::Foo() { a = 12345; }
+ns::Foo2::Foo2() { a = 23456; }
-char Foo::baz() {
- return (char)(x&0xff);
-}
+Foo foo1;
+Foo foo2;
+ns::Foo2 foo2_1;
+ns::Foo2 foo2_2;
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/derived.h b/lldb/test/API/lang/cpp/limit-debug-info/derived.h
index 8f95c52a595f..a4aab37d9dee 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/derived.h
+++ b/lldb/test/API/lang/cpp/limit-debug-info/derived.h
@@ -1,19 +1,36 @@
#include "base.h"
-class Foo : public FooNS
-{
+class Foo : public FooBase {
public:
- Foo();
+ Foo();
- // Deliberately defined by hand.
- Foo &operator=(const Foo &rhs) {
- a = rhs.a;
- return *this;
- }
+ // Deliberately defined by hand.
+ Foo &operator=(const Foo &rhs) {
+ x = rhs.x; // break1
+ a = rhs.a;
+ return *this;
+ }
+ int a;
+};
+
+namespace ns {
+class Foo2 : public Foo2Base {
+public:
+ Foo2();
- char baz() override;
- int a;
+ // Deliberately defined by hand.
+ Foo2 &operator=(const Foo2 &rhs) {
+ x = rhs.x; // break2
+ a = rhs.a;
+ return *this;
+ }
+
+ int a;
};
+} // namespace ns
extern Foo foo1;
extern Foo foo2;
+
+extern ns::Foo2 foo2_1;
+extern ns::Foo2 foo2_2;
diff --git a/lldb/test/API/lang/cpp/limit-debug-info/main.cpp b/lldb/test/API/lang/cpp/limit-debug-info/main.cpp
index 35cb0373ae39..405fc2cdd4c8 100644
--- a/lldb/test/API/lang/cpp/limit-debug-info/main.cpp
+++ b/lldb/test/API/lang/cpp/limit-debug-info/main.cpp
@@ -1,8 +1,6 @@
#include "derived.h"
int main() {
- foo1 = foo2; // break here
-
- foo1.bar();
- return foo1.baz();
+ foo1 = foo2;
+ foo2_1 = foo2_2;
}
diff --git a/lldb/test/API/python_api/debugger/TestDebuggerAPI.py b/lldb/test/API/python_api/debugger/TestDebuggerAPI.py
index 522de2466012..29b8cfadd947 100644
--- a/lldb/test/API/python_api/debugger/TestDebuggerAPI.py
+++ b/lldb/test/API/python_api/debugger/TestDebuggerAPI.py
@@ -161,3 +161,124 @@ class DebuggerAPITestCase(TestBase):
original_dbg_id = self.dbg.GetID()
self.dbg.Destroy(self.dbg)
self.assertEqual(destroy_dbg_id, original_dbg_id)
+
+ def test_AddDestroyCallback(self):
+ original_dbg_id = self.dbg.GetID()
+ called = []
+
+ def foo(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal called
+ called += [('foo', dbg_id)]
+
+ def bar(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal called
+ called += [('bar', dbg_id)]
+
+ token_foo = self.dbg.AddDestroyCallback(foo)
+ token_bar = self.dbg.AddDestroyCallback(bar)
+ self.dbg.Destroy(self.dbg)
+
+ # Should call both `foo()` and `bar()`.
+ self.assertEqual(called, [
+ ('foo', original_dbg_id),
+ ('bar', original_dbg_id),
+ ])
+
+ def test_RemoveDestroyCallback(self):
+ original_dbg_id = self.dbg.GetID()
+ called = []
+
+ def foo(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal called
+ called += [('foo', dbg_id)]
+
+ def bar(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal called
+ called += [('bar', dbg_id)]
+
+ token_foo = self.dbg.AddDestroyCallback(foo)
+ token_bar = self.dbg.AddDestroyCallback(bar)
+ ret = self.dbg.RemoveDestroyCallback(token_foo)
+ self.dbg.Destroy(self.dbg)
+
+ # `Remove` should be successful
+ self.assertTrue(ret)
+ # Should only call `bar()`
+ self.assertEqual(called, [('bar', original_dbg_id)])
+
+ def test_RemoveDestroyCallback_invalid_token(self):
+ original_dbg_id = self.dbg.GetID()
+ magic_token_that_should_not_exist = 32413
+ called = []
+
+ def foo(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal called
+ called += [('foo', dbg_id)]
+
+ token_foo = self.dbg.AddDestroyCallback(foo)
+ ret = self.dbg.RemoveDestroyCallback(magic_token_that_should_not_exist)
+ self.dbg.Destroy(self.dbg)
+
+ # `Remove` should be unsuccessful
+ self.assertFalse(ret)
+ # Should call `foo()`
+ self.assertEqual(called, [('foo', original_dbg_id)])
+
+ def test_HandleDestroyCallback(self):
+ """
+ Validates:
+ 1. AddDestroyCallback and RemoveDestroyCallback work during debugger destroy.
+ 2. HandleDestroyCallback invokes all callbacks in FIFO order.
+ """
+ original_dbg_id = self.dbg.GetID()
+ events = []
+ bar_token = None
+
+ def foo(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal events
+ events.append(('foo called', dbg_id))
+
+ def bar(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal events
+ events.append(('bar called', dbg_id))
+
+ def add_foo(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal events
+ events.append(('add_foo called', dbg_id))
+ events.append(('foo token', self.dbg.AddDestroyCallback(foo)))
+
+ def remove_bar(dbg_id):
+ # Need nonlocal to modify closure variable.
+ nonlocal events
+ events.append(('remove_bar called', dbg_id))
+ events.append(('remove bar ret', self.dbg.RemoveDestroyCallback(bar_token)))
+
+ # Setup
+ events.append(('add_foo token', self.dbg.AddDestroyCallback(add_foo)))
+ bar_token = self.dbg.AddDestroyCallback(bar)
+ events.append(('bar token', bar_token))
+ events.append(('remove_bar token', self.dbg.AddDestroyCallback(remove_bar)))
+ # Destroy
+ self.dbg.Destroy(self.dbg)
+
+ self.assertEqual(events, [
+ # Setup
+ ('add_foo token', 0), # add_foo should be added
+ ('bar token', 1), # bar should be added
+ ('remove_bar token', 2), # remove_bar should be added
+ # Destroy
+ ('add_foo called', original_dbg_id), # add_foo should be called
+ ('foo token', 3), # foo should be added
+ ('bar called', original_dbg_id), # bar should be called
+ ('remove_bar called', original_dbg_id), # remove_bar should be called
+ ('remove bar ret', False), # remove_bar should fail, because it's already invoked and removed
+ ('foo called', original_dbg_id), # foo should be called
+ ])
diff --git a/lldb/test/API/python_api/interpreter/TestCommandInterpreterAPI.py b/lldb/test/API/python_api/interpreter/TestCommandInterpreterAPI.py
index 8f9fbfc255bb..95643eef0d34 100644
--- a/lldb/test/API/python_api/interpreter/TestCommandInterpreterAPI.py
+++ b/lldb/test/API/python_api/interpreter/TestCommandInterpreterAPI.py
@@ -1,5 +1,6 @@
"""Test the SBCommandInterpreter APIs."""
+import json
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
@@ -15,8 +16,7 @@ class CommandInterpreterAPICase(TestBase):
# Find the line number to break on inside main.cpp.
self.line = line_number("main.c", "Hello world.")
- def test_with_process_launch_api(self):
- """Test the SBCommandInterpreter APIs."""
+ def buildAndCreateTarget(self):
self.build()
exe = self.getBuildArtifact("a.out")
@@ -27,6 +27,11 @@ class CommandInterpreterAPICase(TestBase):
# Retrieve the associated command interpreter from our debugger.
ci = self.dbg.GetCommandInterpreter()
self.assertTrue(ci, VALID_COMMAND_INTERPRETER)
+ return ci
+
+ def test_with_process_launch_api(self):
+ """Test the SBCommandInterpreter APIs."""
+ ci = self.buildAndCreateTarget()
# Exercise some APIs....
@@ -85,3 +90,166 @@ class CommandInterpreterAPICase(TestBase):
self.assertEqual(res.GetOutput(), "")
self.assertIsNotNone(res.GetError())
self.assertEqual(res.GetError(), "")
+
+ def getTranscriptAsPythonObject(self, ci):
+ """Retrieve the transcript and convert it into a Python object"""
+ structured_data = ci.GetTranscript()
+ self.assertTrue(structured_data.IsValid())
+
+ stream = lldb.SBStream()
+ self.assertTrue(stream)
+
+ error = structured_data.GetAsJSON(stream)
+ self.assertSuccess(error)
+
+ return json.loads(stream.GetData())
+
+ def test_structured_transcript(self):
+ """Test structured transcript generation and retrieval."""
+ ci = self.buildAndCreateTarget()
+
+ # Make sure the "save-transcript" setting is on
+ self.runCmd("settings set interpreter.save-transcript true")
+
+ # Send a few commands through the command interpreter.
+ #
+ # Using `ci.HandleCommand` because some commands will fail so that we
+ # can test the "error" field in the saved transcript.
+ res = lldb.SBCommandReturnObject()
+ ci.HandleCommand("version", res)
+ ci.HandleCommand("an-unknown-command", res)
+ ci.HandleCommand("breakpoint set -f main.c -l %d" % self.line, res)
+ ci.HandleCommand("r", res)
+ ci.HandleCommand("p a", res)
+ ci.HandleCommand("statistics dump", res)
+ total_number_of_commands = 6
+
+ # Get transcript as python object
+ transcript = self.getTranscriptAsPythonObject(ci)
+
+ # All commands should have expected fields.
+ for command in transcript:
+ self.assertIn("command", command)
+ self.assertIn("output", command)
+ self.assertIn("error", command)
+ self.assertIn("seconds", command)
+
+ # The following validates individual commands in the transcript.
+ #
+ # Notes:
+ # 1. Some of the asserts rely on the exact output format of the
+ # commands. Hopefully we are not changing them any time soon.
+ # 2. We are removing the "seconds" field from each command, so that
+ # some of the validations below can be easier / more readable.
+ for command in transcript:
+ del(command["seconds"])
+
+ # (lldb) version
+ self.assertEqual(transcript[0]["command"], "version")
+ self.assertIn("lldb version", transcript[0]["output"])
+ self.assertEqual(transcript[0]["error"], "")
+
+ # (lldb) an-unknown-command
+ self.assertEqual(transcript[1],
+ {
+ "command": "an-unknown-command",
+ "output": "",
+ "error": "error: 'an-unknown-command' is not a valid command.\n",
+ })
+
+ # (lldb) breakpoint set -f main.c -l <line>
+ self.assertEqual(transcript[2]["command"], "breakpoint set -f main.c -l %d" % self.line)
+ # Breakpoint 1: where = a.out`main + 29 at main.c:5:3, address = 0x0000000100000f7d
+ self.assertIn("Breakpoint 1: where = a.out`main ", transcript[2]["output"])
+ self.assertEqual(transcript[2]["error"], "")
+
+ # (lldb) r
+ self.assertEqual(transcript[3]["command"], "r")
+ # Process 25494 launched: '<path>/TestCommandInterpreterAPI.test_structured_transcript/a.out' (x86_64)
+ self.assertIn("Process", transcript[3]["output"])
+ self.assertIn("launched", transcript[3]["output"])
+ self.assertEqual(transcript[3]["error"], "")
+
+ # (lldb) p a
+ self.assertEqual(transcript[4],
+ {
+ "command": "p a",
+ "output": "(int) 123\n",
+ "error": "",
+ })
+
+ # (lldb) statistics dump
+ statistics_dump = json.loads(transcript[5]["output"])
+ # Dump result should be valid JSON
+ self.assertTrue(statistics_dump is not json.JSONDecodeError)
+ # Dump result should contain expected fields
+ self.assertIn("commands", statistics_dump)
+ self.assertIn("memory", statistics_dump)
+ self.assertIn("modules", statistics_dump)
+ self.assertIn("targets", statistics_dump)
+
+ def test_save_transcript_setting_default(self):
+ ci = self.buildAndCreateTarget()
+ res = lldb.SBCommandReturnObject()
+
+ # The setting's default value should be "false"
+ self.runCmd("settings show interpreter.save-transcript", "interpreter.save-transcript (boolean) = false\n")
+ # self.assertEqual(res.GetOutput(), )
+
+ def test_save_transcript_setting_off(self):
+ ci = self.buildAndCreateTarget()
+
+ # Make sure the setting is off
+ self.runCmd("settings set interpreter.save-transcript false")
+
+ # The transcript should be empty after running a command
+ self.runCmd("version")
+ transcript = self.getTranscriptAsPythonObject(ci)
+ self.assertEqual(transcript, [])
+
+ def test_save_transcript_setting_on(self):
+ ci = self.buildAndCreateTarget()
+ res = lldb.SBCommandReturnObject()
+
+ # Make sure the setting is on
+ self.runCmd("settings set interpreter.save-transcript true")
+
+ # The transcript should contain one item after running a command
+ self.runCmd("version")
+ transcript = self.getTranscriptAsPythonObject(ci)
+ self.assertEqual(len(transcript), 1)
+ self.assertEqual(transcript[0]["command"], "version")
+
+ def test_save_transcript_returns_copy(self):
+ """
+ Test that the returned structured data is *at least* a shallow copy.
+
+ We believe that a deep copy *is* performed in `SBCommandInterpreter::GetTranscript`.
+ However, the deep copy cannot be tested and doesn't need to be tested,
+ because there is no logic in the command interpreter to modify a
+ transcript item (representing a command) after it has been returned.
+ """
+ ci = self.buildAndCreateTarget()
+
+ # Make sure the setting is on
+ self.runCmd("settings set interpreter.save-transcript true")
+
+ # Run commands and get the transcript as structured data
+ self.runCmd("version")
+ structured_data_1 = ci.GetTranscript()
+ self.assertTrue(structured_data_1.IsValid())
+ self.assertEqual(structured_data_1.GetSize(), 1)
+ self.assertEqual(structured_data_1.GetItemAtIndex(0).GetValueForKey("command").GetStringValue(100), "version")
+
+ # Run some more commands and get the transcript as structured data again
+ self.runCmd("help")
+ structured_data_2 = ci.GetTranscript()
+ self.assertTrue(structured_data_2.IsValid())
+ self.assertEqual(structured_data_2.GetSize(), 2)
+ self.assertEqual(structured_data_2.GetItemAtIndex(0).GetValueForKey("command").GetStringValue(100), "version")
+ self.assertEqual(structured_data_2.GetItemAtIndex(1).GetValueForKey("command").GetStringValue(100), "help")
+
+ # Now, the first structured data should remain unchanged
+ self.assertTrue(structured_data_1.IsValid())
+ self.assertEqual(structured_data_1.GetSize(), 1)
+ self.assertEqual(structured_data_1.GetItemAtIndex(0).GetValueForKey("command").GetStringValue(100), "version")
diff --git a/lldb/test/API/python_api/interpreter/main.c b/lldb/test/API/python_api/interpreter/main.c
index 277aa54a4eea..366ffde5fdef 100644
--- a/lldb/test/API/python_api/interpreter/main.c
+++ b/lldb/test/API/python_api/interpreter/main.c
@@ -1,6 +1,7 @@
#include <stdio.h>
int main(int argc, char const *argv[]) {
- printf("Hello world.\n");
- return 0;
+ int a = 123;
+ printf("Hello world.\n");
+ return 0;
}
diff --git a/lldb/test/API/tools/lldb-dap/evaluate/TestDAP_evaluate.py b/lldb/test/API/tools/lldb-dap/evaluate/TestDAP_evaluate.py
index 57cabf5b7f41..68c57ad77554 100644
--- a/lldb/test/API/tools/lldb-dap/evaluate/TestDAP_evaluate.py
+++ b/lldb/test/API/tools/lldb-dap/evaluate/TestDAP_evaluate.py
@@ -27,7 +27,7 @@ class TestDAP_evaluate(lldbdap_testcase.DAPTestCaseBase):
)
def isResultExpandedDescription(self):
- return self.context == "repl" or self.context == "hover"
+ return self.context == "repl"
def isExpressionParsedExpected(self):
return self.context != "hover"
diff --git a/lldb/test/API/tools/lldb-dap/variables/TestDAP_variables.py b/lldb/test/API/tools/lldb-dap/variables/TestDAP_variables.py
index 07ab6d5a63eb..57c17e5ea9d3 100644
--- a/lldb/test/API/tools/lldb-dap/variables/TestDAP_variables.py
+++ b/lldb/test/API/tools/lldb-dap/variables/TestDAP_variables.py
@@ -502,29 +502,12 @@ class TestDAP_variables(lldbdap_testcase.DAPTestCaseBase):
},
"hover": {
"equals": {"type": "PointType"},
- "equals": {
- "result": """(PointType) pt = {
- x = 11
- y = 22
- buffer = {
- [0] = 0
- [1] = 1
- [2] = 2
- [3] = 3
- [4] = 4
- [5] = 5
- [6] = 6
- [7] = 7
- [8] = 8
- [9] = 9
- [10] = 10
- [11] = 11
- [12] = 12
- [13] = 13
- [14] = 14
- [15] = 15
- }
-}"""
+ "startswith": {
+ "result": (
+ "{x:11, y:22, buffer:{...}}"
+ if enableAutoVariableSummaries
+ else "PointType @ 0x"
+ )
},
"missing": ["indexedVariables"],
"hasVariablesReference": True,
diff --git a/lldb/test/Shell/Unwind/Inputs/signal-in-leaf-function-aarch64.c b/lldb/test/Shell/Unwind/Inputs/signal-in-leaf-function-aarch64.c
new file mode 100644
index 000000000000..fe020affcad0
--- /dev/null
+++ b/lldb/test/Shell/Unwind/Inputs/signal-in-leaf-function-aarch64.c
@@ -0,0 +1,15 @@
+#include <signal.h>
+#include <unistd.h>
+
+int __attribute__((naked)) signal_generating_add(int a, int b) {
+ asm("add w0, w1, w0\n\t"
+ "udf #0xdead\n\t"
+ "ret");
+}
+
+void sigill_handler(int signo) { _exit(0); }
+
+int main() {
+ signal(SIGILL, sigill_handler);
+ return signal_generating_add(42, 47);
+}
diff --git a/lldb/test/Shell/Unwind/signal-in-leaf-function-aarch64.test b/lldb/test/Shell/Unwind/signal-in-leaf-function-aarch64.test
new file mode 100644
index 000000000000..2ac2d4a75078
--- /dev/null
+++ b/lldb/test/Shell/Unwind/signal-in-leaf-function-aarch64.test
@@ -0,0 +1,30 @@
+# REQUIRES: target-aarch64 && native
+# UNSUPPORTED: system-windows
+# llvm.org/pr91610, rdar://128031075
+# XFAIL: system-darwin
+
+
+# RUN: %clang_host %S/Inputs/signal-in-leaf-function-aarch64.c -o %t
+# RUN: %lldb -s %s -o exit %t | FileCheck %s
+
+# Convert EXC_BAD_INSTRUCTION to SIGILL on darwin
+settings set platform.plugin.darwin.ignored-exceptions EXC_BAD_INSTRUCTION
+
+breakpoint set -n sigill_handler
+# CHECK: Breakpoint 1: where = {{.*}}`sigill_handler
+
+run
+# CHECK: thread #1, {{.*}} stop reason = signal SIGILL
+
+thread backtrace
+# CHECK: frame #0: [[ADD:0x[0-9a-fA-F]*]] {{.*}}`signal_generating_add
+# CHECK: frame #1: [[MAIN:0x[0-9a-fA-F]*]] {{.*}}`main
+
+continue
+# CHECK: thread #1, {{.*}} stop reason = breakpoint 1
+
+thread backtrace
+# CHECK: frame #0: {{.*}}`sigill_handler
+# Unknown number of signal trampoline frames
+# CHECK: frame #{{[0-9]+}}: [[ADD]] {{.*}}`signal_generating_add
+# CHECK: frame #{{[0-9]+}}: [[MAIN]] {{.*}}`main
diff --git a/lldb/tools/lldb-dap/JSONUtils.cpp b/lldb/tools/lldb-dap/JSONUtils.cpp
index bec277332bcf..069877dbab33 100644
--- a/lldb/tools/lldb-dap/JSONUtils.cpp
+++ b/lldb/tools/lldb-dap/JSONUtils.cpp
@@ -1065,9 +1065,9 @@ llvm::json::Object VariableDescription::GetVariableExtensionsJSON() {
}
std::string VariableDescription::GetResult(llvm::StringRef context) {
- // In repl and hover context, the results can be displayed as multiple lines
- // so more detailed descriptions can be returned.
- if (context != "repl" && context != "hover")
+ // In repl context, the results can be displayed as multiple lines so more
+ // detailed descriptions can be returned.
+ if (context != "repl")
return display_value;
if (!v.IsValid())
diff --git a/lldb/unittests/SymbolFile/DWARF/DWARFDIETest.cpp b/lldb/unittests/SymbolFile/DWARF/DWARFDIETest.cpp
index bcb211815f9f..20742ea51230 100644
--- a/lldb/unittests/SymbolFile/DWARF/DWARFDIETest.cpp
+++ b/lldb/unittests/SymbolFile/DWARF/DWARFDIETest.cpp
@@ -9,6 +9,7 @@
#include "Plugins/SymbolFile/DWARF/DWARFDIE.h"
#include "Plugins/SymbolFile/DWARF/DWARFDebugInfo.h"
#include "TestingSupport/Symbol/YAMLModuleTester.h"
+#include "lldb/Core/dwarf.h"
#include "llvm/ADT/STLExtras.h"
#include "gmock/gmock.h"
#include "gtest/gtest.h"
@@ -169,21 +170,20 @@ DWARF:
YAMLModuleTester t(yamldata);
auto *symbol_file =
llvm::cast<SymbolFileDWARF>(t.GetModule()->GetSymbolFile());
- auto &debug_info = symbol_file->DebugInfo();
+ DWARFUnit *unit = symbol_file->DebugInfo().GetUnitAtIndex(0);
- DIERef first_die(std::nullopt, DIERef::Section::DebugInfo,
- 11 /*FirstDIEOffset*/);
- EXPECT_EQ(debug_info.PeekDIEName(first_die), "");
+ dw_offset_t first_die_offset = 11;
+ EXPECT_EQ(unit->PeekDIEName(first_die_offset), "");
- DIERef second_die(std::nullopt, DIERef::Section::DebugInfo, 14);
- EXPECT_EQ(debug_info.PeekDIEName(second_die), "NameType1");
+ dw_offset_t second_die_offset = 14;
+ EXPECT_EQ(unit->PeekDIEName(second_die_offset), "NameType1");
- DIERef third_die(std::nullopt, DIERef::Section::DebugInfo, 19);
- EXPECT_EQ(debug_info.PeekDIEName(third_die), "NameType2");
+ dw_offset_t third_die_offset = 19;
+ EXPECT_EQ(unit->PeekDIEName(third_die_offset), "NameType2");
- DIERef fourth_die(std::nullopt, DIERef::Section::DebugInfo, 24);
- EXPECT_EQ(debug_info.PeekDIEName(fourth_die), "NameType1");
+ dw_offset_t fourth_die_offset = 24;
+ EXPECT_EQ(unit->PeekDIEName(fourth_die_offset), "NameType1");
- DIERef fifth_die(std::nullopt, DIERef::Section::DebugInfo, 26);
- EXPECT_EQ(debug_info.PeekDIEName(fifth_die), "NameType2");
+ dw_offset_t fifth_die_offset = 26;
+ EXPECT_EQ(unit->PeekDIEName(fifth_die_offset), "NameType2");
}
diff --git a/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp b/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp
index 80abeb8fae9e..9303d6f5f3c6 100644
--- a/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp
+++ b/lldb/unittests/UnwindAssembly/ARM64/TestArm64InstEmulation.cpp
@@ -77,7 +77,7 @@ TEST_F(TestArm64InstEmulation, TestSimpleDarwinFunction) {
// UnwindPlan we expect:
- // row[0]: 0: CFA=sp +0 =>
+ // row[0]: 0: CFA=sp +0 => fp= <same> lr= <same>
// row[1]: 4: CFA=sp+16 => fp=[CFA-16] lr=[CFA-8]
// row[2]: 8: CFA=fp+16 => fp=[CFA-16] lr=[CFA-8]
// row[2]: 16: CFA=sp+16 => fp=[CFA-16] lr=[CFA-8]
@@ -88,13 +88,19 @@ TEST_F(TestArm64InstEmulation, TestSimpleDarwinFunction) {
EXPECT_TRUE(engine->GetNonCallSiteUnwindPlanFromAssembly(
sample_range, data, sizeof(data), unwind_plan));
- // CFA=sp +0
+ // CFA=sp +0 => fp= <same> lr= <same>
row_sp = unwind_plan.GetRowForFunctionOffset(0);
EXPECT_EQ(0ull, row_sp->GetOffset());
EXPECT_TRUE(row_sp->GetCFAValue().GetRegisterNumber() == gpr_sp_arm64);
EXPECT_TRUE(row_sp->GetCFAValue().IsRegisterPlusOffset() == true);
EXPECT_EQ(0, row_sp->GetCFAValue().GetOffset());
+ EXPECT_TRUE(row_sp->GetRegisterInfo(gpr_fp_arm64, regloc));
+ EXPECT_TRUE(regloc.IsSame());
+
+ EXPECT_TRUE(row_sp->GetRegisterInfo(gpr_lr_arm64, regloc));
+ EXPECT_TRUE(regloc.IsSame());
+
// CFA=sp+16 => fp=[CFA-16] lr=[CFA-8]
row_sp = unwind_plan.GetRowForFunctionOffset(4);
EXPECT_EQ(4ull, row_sp->GetOffset());
@@ -146,6 +152,12 @@ TEST_F(TestArm64InstEmulation, TestSimpleDarwinFunction) {
EXPECT_TRUE(row_sp->GetCFAValue().GetRegisterNumber() == gpr_sp_arm64);
EXPECT_TRUE(row_sp->GetCFAValue().IsRegisterPlusOffset() == true);
EXPECT_EQ(0, row_sp->GetCFAValue().GetOffset());
+
+ EXPECT_TRUE(row_sp->GetRegisterInfo(gpr_fp_arm64, regloc));
+ EXPECT_TRUE(regloc.IsSame());
+
+ EXPECT_TRUE(row_sp->GetRegisterInfo(gpr_lr_arm64, regloc));
+ EXPECT_TRUE(regloc.IsSame());
}
TEST_F(TestArm64InstEmulation, TestMediumDarwinFunction) {
@@ -381,8 +393,12 @@ TEST_F(TestArm64InstEmulation, TestFramelessThreeEpilogueFunction) {
EXPECT_FALSE(row_sp->GetRegisterInfo(gpr_x26_arm64, regloc));
EXPECT_FALSE(row_sp->GetRegisterInfo(gpr_x27_arm64, regloc));
EXPECT_FALSE(row_sp->GetRegisterInfo(gpr_x28_arm64, regloc));
- EXPECT_FALSE(row_sp->GetRegisterInfo(gpr_fp_arm64, regloc));
- EXPECT_FALSE(row_sp->GetRegisterInfo(gpr_lr_arm64, regloc));
+
+ EXPECT_TRUE(row_sp->GetRegisterInfo(gpr_fp_arm64, regloc));
+ EXPECT_TRUE(regloc.IsSame());
+
+ EXPECT_TRUE(row_sp->GetRegisterInfo(gpr_lr_arm64, regloc));
+ EXPECT_TRUE(regloc.IsSame());
row_sp = unwind_plan.GetRowForFunctionOffset(36);
EXPECT_TRUE(row_sp->GetCFAValue().GetRegisterNumber() == gpr_sp_arm64);
diff --git a/llvm/cmake/modules/HandleLLVMOptions.cmake b/llvm/cmake/modules/HandleLLVMOptions.cmake
index 185266c0861e..d16641d83190 100644
--- a/llvm/cmake/modules/HandleLLVMOptions.cmake
+++ b/llvm/cmake/modules/HandleLLVMOptions.cmake
@@ -158,7 +158,7 @@ if(LLVM_ENABLE_EXPENSIVE_CHECKS)
endif()
add_compile_definitions(EXPENSIVE_CHECKS)
- # In some libstdc++ versions, std::min_element is not constexpr when
+ # In libstdc++ 9 and earlier, std::min_element is not constexpr when
# _GLIBCXX_DEBUG is enabled.
CHECK_CXX_SOURCE_COMPILES("
#define _GLIBCXX_DEBUG
diff --git a/llvm/docs/AMDGPUUsage.rst b/llvm/docs/AMDGPUUsage.rst
index 75536bc5bea6..1004956ac8f1 100644
--- a/llvm/docs/AMDGPUUsage.rst
+++ b/llvm/docs/AMDGPUUsage.rst
@@ -1970,6 +1970,8 @@ The AMDGPU backend uses the following ELF header:
``EF_AMDGPU_MACH_AMDGCN_GFX10_3_GENERIC`` 0x053 ``gfx10-3-generic``
``EF_AMDGPU_MACH_AMDGCN_GFX11_GENERIC`` 0x054 ``gfx11-generic``
*reserved* 0x055 Reserved.
+ *reserved* 0x056 Reserved.
+ *reserved* 0x057 Reserved.
========================================== ========== =============================
Sections
diff --git a/llvm/docs/DeveloperPolicy.rst b/llvm/docs/DeveloperPolicy.rst
index 5d3731d761a3..f2ac46e6c04f 100644
--- a/llvm/docs/DeveloperPolicy.rst
+++ b/llvm/docs/DeveloperPolicy.rst
@@ -1069,7 +1069,7 @@ In certain circumstances, code licensed under other licenses can be added
to the codebase. However, this may only be done with approval of the LLVM
Foundation Board of Directors, and contributors should plan for the approval
process to take at least 4-6 weeks. If you would like to contribute code
-under a different license, please create a Phabricator review with the code
+under a different license, please create a pull request with the code
you want to contribute and email board@llvm.org requesting a review.
If you have questions or comments about these topics, please ask on the
@@ -1124,20 +1124,17 @@ To relicense LLVM, we will be seeking approval from all of the copyright holders
of code in the repository, or potentially remove/rewrite code if we cannot.
This is a large
and challenging project which will take a significant amount of time to
-complete. In the interim, **all contributions to the project will be made under
-the terms of both the new license and the legacy license scheme** (each of which
-is described below). The exception to this is the legacy patent grant, which
-will not be required for new contributions.
+complete.
-When all of the code in the project has been converted to the new license or
-removed, we will drop the requirement to contribute under the legacy license.
-This will achieve the goal of having
-a single standardized license for the entire codebase.
+Starting on 2024-06-01 (first of June 2024), new contributions only need to
+be covered by the new LLVM license, i.e. Apache-2.0 WITH LLVM-exception.
+Before this date, the project required all contributions to be made under
+both the new license and the legacy license.
-If you are a prior contributor to LLVM and have not done so already, please do
-*TODO* to allow us to use your code. *Add a link to a separate page here, which
-is probably a click through web form or something like that. Details to be
-determined later*.
+If you are a contributor to LLVM with contributions committed before 2019-01-19
+and have not done so already, please do follow the instructions at
+https://foundation.llvm.org/docs/relicensing/, under section "Individual
+Relicensing Agreement" to relicense your contributions under the new license.
.. _open source licensing terms:
@@ -1264,12 +1261,11 @@ Legacy License Structure
.. note::
The code base was previously licensed under the Terms described here.
- We are in the middle of relicensing to a new approach (described above), but
- until this effort is complete, the code is also still available under these
- terms. Once we finish the relicensing project, new versions of the code will
- not be available under these terms. However, nothing takes away your right
- to use old versions under the licensing terms under which they were
- originally released.
+ We are in the middle of relicensing to a new approach (described above).
+ More than 99% of all contributions made to LLVM are covered by the Apache-2.0
+ WITH LLVM-exception license. A small portion of LLVM code remains exclusively
+ covered by the legacy license. Contributions after 2024-06-01 are covered
+ exclusively by the new license._
We intend to keep LLVM perpetually open source and to use a permissive open
source license. The code in
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index e2f4d8bfcaee..358eb4b86792 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -290,13 +290,17 @@ linkage:
symbol is weak until linked, if not linked, the symbol becomes null
instead of being an undefined reference.
``linkonce_odr``, ``weak_odr``
- Some languages allow differing globals to be merged, such as two
- functions with different semantics. Other languages, such as
- ``C++``, ensure that only equivalent globals are ever merged (the
- "one definition rule" --- "ODR"). Such languages can use the
- ``linkonce_odr`` and ``weak_odr`` linkage types to indicate that the
- global will only be merged with equivalent globals. These linkage
- types are otherwise the same as their non-``odr`` versions.
+ The ``odr`` suffix indicates that all globals defined with the given name
+ are equivalent, along the lines of the C++ "one definition rule" ("ODR").
+ Informally, this means we can inline functions and fold loads of constants.
+
+ Formally, use the following definition: when an ``odr`` function is
+ called, one of the definitions is non-deterministically chosen to run. For
+ ``odr`` variables, if any byte in the value is not equal in all
+ initializers, that byte is a :ref:`poison value <poisonvalues>`. For
+ aliases and ifuncs, apply the rule for the underlying function or variable.
+
+ These linkage types are otherwise the same as their non-``odr`` versions.
``external``
If none of the above identifiers are used, the global is externally
visible, meaning that it participates in linkage and can be used to
diff --git a/llvm/docs/MemorySSA.rst b/llvm/docs/MemorySSA.rst
index 17d2c9af96c2..09e9f9a37732 100644
--- a/llvm/docs/MemorySSA.rst
+++ b/llvm/docs/MemorySSA.rst
@@ -295,9 +295,9 @@ A code snippet for such a walk looks like this:
.. code-block:: c++
MemoryDef *Def; // find who's optimized or defining for this MemoryDef
- for (auto& U : Def->uses()) {
- MemoryAccess *MA = cast<MemoryAccess>(Use.getUser());
- if (auto *DefUser = cast_of_null<MemoryDef>MA)
+ for (auto &U : Def->uses()) {
+ MemoryAccess *MA = cast<MemoryAccess>(U.getUser());
+ if (auto *DefUser = dyn_cast<MemoryDef>(MA))
if (DefUser->isOptimized() && DefUser->getOptimized() == Def) {
// User who is optimized to Def
} else {
@@ -312,19 +312,18 @@ the store.
.. code-block:: c++
checkUses(MemoryAccess *Def) { // Def can be a MemoryDef or a MemoryPhi.
- for (auto& U : Def->uses()) {
- MemoryAccess *MA = cast<MemoryAccess>(Use.getUser());
- if (auto *MU = cast_of_null<MemoryUse>MA) {
+ for (auto &U : Def->uses()) {
+ MemoryAccess *MA = cast<MemoryAccess>(U.getUser());
+ if (auto *MU = dyn_cast<MemoryUse>(MA)) {
// Process MemoryUse as needed.
- }
- else {
+ } else {
// Process MemoryDef or MemoryPhi as needed.
// As a user can come up twice, as an optimized access and defining
// access, keep a visited list.
// Check transitive uses as needed
- checkUses (MA); // use a worklist for an iterative algorithm
+ checkUses(MA); // use a worklist for an iterative algorithm
}
}
}
diff --git a/llvm/docs/RISCVUsage.rst b/llvm/docs/RISCVUsage.rst
index ff08c9d345d5..5ecee2a480f7 100644
--- a/llvm/docs/RISCVUsage.rst
+++ b/llvm/docs/RISCVUsage.rst
@@ -119,7 +119,9 @@ on support follow.
``V`` Supported
``Za128rs`` Supported (`See note <#riscv-profiles-extensions-note>`__)
``Za64rs`` Supported (`See note <#riscv-profiles-extensions-note>`__)
+ ``Zaamo`` Assembly Support
``Zacas`` Supported (`See note <#riscv-zacas-note>`__)
+ ``Zalrsc`` Assembly Support
``Zama16b`` Supported (`See note <#riscv-profiles-extensions-note>`__)
``Zawrs`` Assembly Support
``Zba`` Supported
@@ -275,9 +277,6 @@ The primary goal of experimental support is to assist in the process of ratifica
``experimental-ztso``
LLVM implements the `v0.1 proposed specification <https://github.com/riscv/riscv-isa-manual/releases/download/draft-20220723-10eea63/riscv-spec.pdf>`__ (see Chapter 25). The mapping from the C/C++ memory model to Ztso has not yet been ratified in any standards document. There are multiple possible mappings, and they are *not* mutually ABI compatible. The mapping LLVM implements is ABI compatible with the default WMO mapping. This mapping may change and there is *explicitly* no ABI stability offered while the extension remains in experimental status. User beware.
-``experimental-zaamo``, ``experimental-zalrsc``
- LLVM implements the `v0.2 proposed specification <https://github.com/riscv/riscv-zaamo-zalrsc/releases/tag/v0.2>`__.
-
To use an experimental extension from `clang`, you must add `-menable-experimental-extensions` to the command line, and specify the exact version of the experimental extension you are using. To use an experimental extension with LLVM's internal developer tools (e.g. `llc`, `llvm-objdump`, `llvm-mc`), you must prefix the extension name with `experimental-`. Note that you don't need to specify the version with internal tools, and shouldn't include the `experimental-` prefix with `clang`.
Vendor Extensions
diff --git a/llvm/docs/ReleaseNotes.rst b/llvm/docs/ReleaseNotes.rst
index 84320461fa9e..cba36c7177da 100644
--- a/llvm/docs/ReleaseNotes.rst
+++ b/llvm/docs/ReleaseNotes.rst
@@ -129,6 +129,7 @@ Changes to the RISC-V Backend
* llvm-objdump now prints disassembled opcode bytes in groups of 2 or 4 bytes to
match GNU objdump. The bytes within the groups are in big endian order.
* Added smstateen extension to -march. CSR names for smstateen were already supported.
+* Zaamo and Zalrsc are no longer experimental.
Changes to the WebAssembly Backend
----------------------------------
diff --git a/llvm/include/llvm/Analysis/CFG.h b/llvm/include/llvm/Analysis/CFG.h
index 86b01c13274f..23bc10a4a9d1 100644
--- a/llvm/include/llvm/Analysis/CFG.h
+++ b/llvm/include/llvm/Analysis/CFG.h
@@ -96,6 +96,18 @@ bool isPotentiallyReachableFromMany(
const SmallPtrSetImpl<BasicBlock *> *ExclusionSet,
const DominatorTree *DT = nullptr, const LoopInfo *LI = nullptr);
+/// Determine whether there is a potentially a path from at least one block in
+/// 'Worklist' to at least one block in 'StopSet' within a single function
+/// without passing through any of the blocks in 'ExclusionSet'. Returns false
+/// only if we can prove that once any block in 'Worklist' has been reached then
+/// no blocks in 'StopSet' can be executed without passing through any blocks in
+/// 'ExclusionSet'. Conservatively returns true.
+bool isManyPotentiallyReachableFromMany(
+ SmallVectorImpl<BasicBlock *> &Worklist,
+ const SmallPtrSetImpl<const BasicBlock *> &StopSet,
+ const SmallPtrSetImpl<BasicBlock *> *ExclusionSet,
+ const DominatorTree *DT = nullptr, const LoopInfo *LI = nullptr);
+
/// Return true if the control flow in \p RPOTraversal is irreducible.
///
/// This is a generic implementation to detect CFG irreducibility based on loop
diff --git a/llvm/include/llvm/Analysis/InstSimplifyFolder.h b/llvm/include/llvm/Analysis/InstSimplifyFolder.h
index 8a3269d6add0..98c7c291fea1 100644
--- a/llvm/include/llvm/Analysis/InstSimplifyFolder.h
+++ b/llvm/include/llvm/Analysis/InstSimplifyFolder.h
@@ -72,8 +72,8 @@ public:
return simplifyUnOp(Opc, V, FMF, SQ);
}
- Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
- return simplifyICmpInst(P, LHS, RHS, SQ);
+ Value *FoldCmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
+ return simplifyCmpInst(P, LHS, RHS, SQ);
}
Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
@@ -139,15 +139,6 @@ public:
return C; // avoid calling Fold
return ConstFolder.CreatePointerBitCastOrAddrSpaceCast(C, DestTy);
}
-
- //===--------------------------------------------------------------------===//
- // Compare Instructions
- //===--------------------------------------------------------------------===//
-
- Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const override {
- return ConstFolder.CreateFCmp(P, LHS, RHS);
- }
};
} // end namespace llvm
diff --git a/llvm/include/llvm/Analysis/TargetFolder.h b/llvm/include/llvm/Analysis/TargetFolder.h
index b4105ad76c02..f95d738a4065 100644
--- a/llvm/include/llvm/Analysis/TargetFolder.h
+++ b/llvm/include/llvm/Analysis/TargetFolder.h
@@ -99,7 +99,7 @@ public:
return FoldBinOp(Opc, LHS, RHS);
}
- Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
+ Value *FoldCmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
if (LC && RC)
@@ -216,15 +216,6 @@ public:
return C; // avoid calling Fold
return Fold(ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy));
}
-
- //===--------------------------------------------------------------------===//
- // Compare Instructions
- //===--------------------------------------------------------------------===//
-
- Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const override {
- return Fold(ConstantExpr::getCompare(P, LHS, RHS));
- }
};
}
diff --git a/llvm/include/llvm/BinaryFormat/ELF.h b/llvm/include/llvm/BinaryFormat/ELF.h
index f296acc2ca4b..67cacaed2e12 100644
--- a/llvm/include/llvm/BinaryFormat/ELF.h
+++ b/llvm/include/llvm/BinaryFormat/ELF.h
@@ -796,11 +796,13 @@ enum : unsigned {
EF_AMDGPU_MACH_AMDGCN_GFX10_3_GENERIC = 0x053,
EF_AMDGPU_MACH_AMDGCN_GFX11_GENERIC = 0x054,
EF_AMDGPU_MACH_AMDGCN_RESERVED_0X55 = 0x055,
+ EF_AMDGPU_MACH_AMDGCN_RESERVED_0X56 = 0x056,
+ EF_AMDGPU_MACH_AMDGCN_RESERVED_0X57 = 0x057,
// clang-format on
// First/last AMDGCN-based processors.
EF_AMDGPU_MACH_AMDGCN_FIRST = EF_AMDGPU_MACH_AMDGCN_GFX600,
- EF_AMDGPU_MACH_AMDGCN_LAST = EF_AMDGPU_MACH_AMDGCN_GFX11_GENERIC,
+ EF_AMDGPU_MACH_AMDGCN_LAST = EF_AMDGPU_MACH_AMDGCN_RESERVED_0X57,
// Indicates if the "xnack" target feature is enabled for all code contained
// in the object.
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h
index 371c5c5a0a1e..cc2dd2f4e489 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutor.h
@@ -212,6 +212,10 @@ enum {
/// - InsnID(ULEB128) - Instruction ID
GIM_CheckHasNoUse,
+ /// Check if there's one use of the first result.
+ /// - InsnID(ULEB128) - Instruction ID
+ GIM_CheckHasOneUse,
+
/// Check the type for the specified operand
/// - InsnID(ULEB128) - Instruction ID
/// - OpIdx(ULEB128) - Operand index
diff --git a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
index 2ea9d11779f0..05f1a7e57e56 100644
--- a/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
+++ b/llvm/include/llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h
@@ -468,7 +468,24 @@ bool GIMatchTableExecutor::executeMatchTable(
if (handleReject() == RejectAndGiveUp)
return false;
}
+ break;
+ }
+ case GIM_CheckHasOneUse: {
+ uint64_t InsnID = readULEB();
+
+ DEBUG_WITH_TYPE(TgtExecutor::getName(),
+ dbgs() << CurrentIdx << ": GIM_CheckHasOneUse(MIs["
+ << InsnID << "]\n");
+
+ const MachineInstr *MI = State.MIs[InsnID];
+ assert(MI && "Used insn before defined");
+ assert(MI->getNumDefs() > 0 && "No defs");
+ const Register Res = MI->getOperand(0).getReg();
+ if (!MRI.hasOneNonDBGUse(Res)) {
+ if (handleReject() == RejectAndGiveUp)
+ return false;
+ }
break;
}
case GIM_CheckAtomicOrdering: {
diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h
index ed6962685f7b..96a627069046 100644
--- a/llvm/include/llvm/CodeGen/SelectionDAG.h
+++ b/llvm/include/llvm/CodeGen/SelectionDAG.h
@@ -991,6 +991,11 @@ public:
/// value assuming it was the smaller SrcTy value.
SDValue getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT);
+ /// Return the expression required to zero extend the Op
+ /// value assuming it was the smaller SrcTy value.
+ SDValue getVPZeroExtendInReg(SDValue Op, SDValue Mask, SDValue EVL,
+ const SDLoc &DL, EVT VT);
+
/// Convert Op, which must be of integer type, to the integer type VT, by
/// either truncating it or performing either zero or sign extension as
/// appropriate extension for the pointer's semantics.
diff --git a/llvm/include/llvm/IR/ConstantFolder.h b/llvm/include/llvm/IR/ConstantFolder.h
index 3e74a563a584..ce4b44ddc855 100644
--- a/llvm/include/llvm/IR/ConstantFolder.h
+++ b/llvm/include/llvm/IR/ConstantFolder.h
@@ -95,7 +95,7 @@ public:
return nullptr;
}
- Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
+ Value *FoldCmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
if (LC && RC)
@@ -201,15 +201,6 @@ public:
Type *DestTy) const override {
return ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy);
}
-
- //===--------------------------------------------------------------------===//
- // Compare Instructions
- //===--------------------------------------------------------------------===//
-
- Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const override {
- return ConstantExpr::getCompare(P, LHS, RHS);
- }
};
} // end namespace llvm
diff --git a/llvm/include/llvm/IR/IRBuilder.h b/llvm/include/llvm/IR/IRBuilder.h
index b6534a1962a2..0d8746344a44 100644
--- a/llvm/include/llvm/IR/IRBuilder.h
+++ b/llvm/include/llvm/IR/IRBuilder.h
@@ -2350,7 +2350,7 @@ public:
Value *CreateICmp(CmpInst::Predicate P, Value *LHS, Value *RHS,
const Twine &Name = "") {
- if (auto *V = Folder.FoldICmp(P, LHS, RHS))
+ if (auto *V = Folder.FoldCmp(P, LHS, RHS))
return V;
return Insert(new ICmpInst(P, LHS, RHS), Name);
}
diff --git a/llvm/include/llvm/IR/IRBuilderFolder.h b/llvm/include/llvm/IR/IRBuilderFolder.h
index 3020f2684ee4..f474c3a0206b 100644
--- a/llvm/include/llvm/IR/IRBuilderFolder.h
+++ b/llvm/include/llvm/IR/IRBuilderFolder.h
@@ -48,8 +48,8 @@ public:
virtual Value *FoldUnOpFMF(Instruction::UnaryOps Opc, Value *V,
FastMathFlags FMF) const = 0;
- virtual Value *FoldICmp(CmpInst::Predicate P, Value *LHS,
- Value *RHS) const = 0;
+ virtual Value *FoldCmp(CmpInst::Predicate P, Value *LHS,
+ Value *RHS) const = 0;
virtual Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
bool IsInBounds = false) const = 0;
@@ -84,13 +84,6 @@ public:
virtual Value *CreatePointerCast(Constant *C, Type *DestTy) const = 0;
virtual Value *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
Type *DestTy) const = 0;
-
- //===--------------------------------------------------------------------===//
- // Compare Instructions
- //===--------------------------------------------------------------------===//
-
- virtual Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
- Constant *RHS) const = 0;
};
} // end namespace llvm
diff --git a/llvm/include/llvm/IR/ModuleSummaryIndex.h b/llvm/include/llvm/IR/ModuleSummaryIndex.h
index 5d137d4b3553..a6bb261af752 100644
--- a/llvm/include/llvm/IR/ModuleSummaryIndex.h
+++ b/llvm/include/llvm/IR/ModuleSummaryIndex.h
@@ -587,6 +587,10 @@ public:
void setImportKind(ImportKind IK) { Flags.ImportType = IK; }
+ GlobalValueSummary::ImportKind importType() const {
+ return static_cast<ImportKind>(Flags.ImportType);
+ }
+
GlobalValue::VisibilityTypes getVisibility() const {
return (GlobalValue::VisibilityTypes)Flags.Visibility;
}
@@ -1272,6 +1276,9 @@ using ModulePathStringTableTy = StringMap<ModuleHash>;
/// a particular module, and provide efficient access to their summary.
using GVSummaryMapTy = DenseMap<GlobalValue::GUID, GlobalValueSummary *>;
+/// A set of global value summary pointers.
+using GVSummaryPtrSet = SmallPtrSet<GlobalValueSummary *, 4>;
+
/// Map of a type GUID to type id string and summary (multimap used
/// in case of GUID conflicts).
using TypeIdSummaryMapTy =
diff --git a/llvm/include/llvm/IR/NoFolder.h b/llvm/include/llvm/IR/NoFolder.h
index 7bb5d5e696e9..72ab22c0d294 100644
--- a/llvm/include/llvm/IR/NoFolder.h
+++ b/llvm/include/llvm/IR/NoFolder.h
@@ -70,7 +70,7 @@ public:
return nullptr;
}
- Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
+ Value *FoldCmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
return nullptr;
}
@@ -129,15 +129,6 @@ public:
Constant *C, Type *DestTy) const override {
return CastInst::CreatePointerBitCastOrAddrSpaceCast(C, DestTy);
}
-
- //===--------------------------------------------------------------------===//
- // Compare Instructions
- //===--------------------------------------------------------------------===//
-
- Instruction *CreateFCmp(CmpInst::Predicate P,
- Constant *LHS, Constant *RHS) const override {
- return new FCmpInst(P, LHS, RHS);
- }
};
} // end namespace llvm
diff --git a/llvm/include/llvm/MCA/IncrementalSourceMgr.h b/llvm/include/llvm/MCA/IncrementalSourceMgr.h
index d53f1138b940..81f9b51cf42f 100644
--- a/llvm/include/llvm/MCA/IncrementalSourceMgr.h
+++ b/llvm/include/llvm/MCA/IncrementalSourceMgr.h
@@ -41,7 +41,7 @@ class IncrementalSourceMgr : public SourceMgr {
bool EOS = false;
/// Called when an instruction is no longer needed.
- using InstFreedCallback = llvm::function_ref<void(Instruction *)>;
+ using InstFreedCallback = std::function<void(Instruction *)>;
InstFreedCallback InstFreedCB;
public:
diff --git a/llvm/include/llvm/MCA/InstrBuilder.h b/llvm/include/llvm/MCA/InstrBuilder.h
index c8619af04b33..359437248914 100644
--- a/llvm/include/llvm/MCA/InstrBuilder.h
+++ b/llvm/include/llvm/MCA/InstrBuilder.h
@@ -79,8 +79,7 @@ class InstrBuilder {
bool FirstCallInst;
bool FirstReturnInst;
- using InstRecycleCallback =
- llvm::function_ref<Instruction *(const InstrDesc &)>;
+ using InstRecycleCallback = std::function<Instruction *(const InstrDesc &)>;
InstRecycleCallback InstRecycleCB;
Expected<const InstrDesc &>
diff --git a/llvm/include/llvm/ProfileData/SampleProfReader.h b/llvm/include/llvm/ProfileData/SampleProfReader.h
index 9e8f543909cd..d7c70064ca42 100644
--- a/llvm/include/llvm/ProfileData/SampleProfReader.h
+++ b/llvm/include/llvm/ProfileData/SampleProfReader.h
@@ -274,8 +274,8 @@ public:
/// Create a remapper from the given remapping file. The remapper will
/// be used for profile read in by Reader.
static ErrorOr<std::unique_ptr<SampleProfileReaderItaniumRemapper>>
- create(const std::string Filename, vfs::FileSystem &FS,
- SampleProfileReader &Reader, LLVMContext &C);
+ create(StringRef Filename, vfs::FileSystem &FS, SampleProfileReader &Reader,
+ LLVMContext &C);
/// Create a remapper from the given Buffer. The remapper will
/// be used for profile read in by Reader.
@@ -436,9 +436,9 @@ public:
/// Create a remapper underlying if RemapFilename is not empty.
/// Parameter P specifies the FSDiscriminatorPass.
static ErrorOr<std::unique_ptr<SampleProfileReader>>
- create(const std::string Filename, LLVMContext &C, vfs::FileSystem &FS,
+ create(StringRef Filename, LLVMContext &C, vfs::FileSystem &FS,
FSDiscriminatorPass P = FSDiscriminatorPass::Base,
- const std::string RemapFilename = "");
+ StringRef RemapFilename = "");
/// Create a sample profile reader from the supplied memory buffer.
/// Create a remapper underlying if RemapFilename is not empty.
@@ -446,7 +446,7 @@ public:
static ErrorOr<std::unique_ptr<SampleProfileReader>>
create(std::unique_ptr<MemoryBuffer> &B, LLVMContext &C, vfs::FileSystem &FS,
FSDiscriminatorPass P = FSDiscriminatorPass::Base,
- const std::string RemapFilename = "");
+ StringRef RemapFilename = "");
/// Return the profile summary.
ProfileSummary &getSummary() const { return *(Summary.get()); }
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index 1684b424e3b4..1c95a6090984 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -884,6 +884,9 @@ class PatFrags<dag ops, list<dag> frags, code pred = [{}],
// If set to true, a predicate is added that checks for the absence of use of
// the first result.
bit HasNoUse = ?;
+ // If set to true, a predicate is added that checks for the sole use of
+ // the first result.
+ bit HasOneUse = ?;
// Is the desired pre-packaged predicate for a load?
bit IsLoad = ?;
diff --git a/llvm/include/llvm/Transforms/IPO/FunctionImport.h b/llvm/include/llvm/Transforms/IPO/FunctionImport.h
index c4d19e8641ec..024bba8105b8 100644
--- a/llvm/include/llvm/Transforms/IPO/FunctionImport.h
+++ b/llvm/include/llvm/Transforms/IPO/FunctionImport.h
@@ -31,9 +31,9 @@ class Module;
/// based on the provided summary informations.
class FunctionImporter {
public:
- /// Set of functions to import from a source module. Each entry is a set
- /// containing all the GUIDs of all functions to import for a source module.
- using FunctionsToImportTy = std::unordered_set<GlobalValue::GUID>;
+ /// The functions to import from a source module and their import type.
+ using FunctionsToImportTy =
+ DenseMap<GlobalValue::GUID, GlobalValueSummary::ImportKind>;
/// The different reasons selectCallee will chose not to import a
/// candidate.
@@ -99,8 +99,13 @@ public:
/// index's module path string table).
using ImportMapTy = DenseMap<StringRef, FunctionsToImportTy>;
- /// The set contains an entry for every global value the module exports.
- using ExportSetTy = DenseSet<ValueInfo>;
+ /// The map contains an entry for every global value the module exports.
+ /// The key is ValueInfo, and the value indicates whether the definition
+ /// or declaration is visible to another module. If a function's definition is
+ /// visible to other modules, the global values this function referenced are
+ /// visible and shouldn't be internalized.
+ /// TODO: Rename to `ExportMapTy`.
+ using ExportSetTy = DenseMap<ValueInfo, GlobalValueSummary::ImportKind>;
/// A function of this type is used to load modules referenced by the index.
using ModuleLoaderTy =
diff --git a/llvm/lib/Analysis/CFG.cpp b/llvm/lib/Analysis/CFG.cpp
index 8528aa9f77e0..841b83505238 100644
--- a/llvm/lib/Analysis/CFG.cpp
+++ b/llvm/lib/Analysis/CFG.cpp
@@ -130,14 +130,21 @@ static const Loop *getOutermostLoop(const LoopInfo *LI, const BasicBlock *BB) {
return L ? L->getOutermostLoop() : nullptr;
}
-bool llvm::isPotentiallyReachableFromMany(
- SmallVectorImpl<BasicBlock *> &Worklist, const BasicBlock *StopBB,
- const SmallPtrSetImpl<BasicBlock *> *ExclusionSet, const DominatorTree *DT,
- const LoopInfo *LI) {
- // When the stop block is unreachable, it's dominated from everywhere,
+template <class StopSetT>
+static bool isReachableImpl(SmallVectorImpl<BasicBlock *> &Worklist,
+ const StopSetT &StopSet,
+ const SmallPtrSetImpl<BasicBlock *> *ExclusionSet,
+ const DominatorTree *DT, const LoopInfo *LI) {
+ // When a stop block is unreachable, it's dominated from everywhere,
// regardless of whether there's a path between the two blocks.
- if (DT && !DT->isReachableFromEntry(StopBB))
- DT = nullptr;
+ if (DT) {
+ for (auto *BB : StopSet) {
+ if (!DT->isReachableFromEntry(BB)) {
+ DT = nullptr;
+ break;
+ }
+ }
+ }
// We can't skip directly from a block that dominates the stop block if the
// exclusion block is potentially in between.
@@ -155,7 +162,13 @@ bool llvm::isPotentiallyReachableFromMany(
}
}
- const Loop *StopLoop = LI ? getOutermostLoop(LI, StopBB) : nullptr;
+ SmallPtrSet<const Loop *, 2> StopLoops;
+ if (LI) {
+ for (auto *StopSetBB : StopSet) {
+ if (const Loop *L = getOutermostLoop(LI, StopSetBB))
+ StopLoops.insert(L);
+ }
+ }
unsigned Limit = DefaultMaxBBsToExplore;
SmallPtrSet<const BasicBlock*, 32> Visited;
@@ -163,12 +176,16 @@ bool llvm::isPotentiallyReachableFromMany(
BasicBlock *BB = Worklist.pop_back_val();
if (!Visited.insert(BB).second)
continue;
- if (BB == StopBB)
+ if (StopSet.contains(BB))
return true;
if (ExclusionSet && ExclusionSet->count(BB))
continue;
- if (DT && DT->dominates(BB, StopBB))
- return true;
+ if (DT) {
+ if (llvm::any_of(StopSet, [&](const BasicBlock *StopBB) {
+ return DT->dominates(BB, StopBB);
+ }))
+ return true;
+ }
const Loop *Outer = nullptr;
if (LI) {
@@ -179,7 +196,7 @@ bool llvm::isPotentiallyReachableFromMany(
// excluded block. Clear Outer so we process BB's successors.
if (LoopsWithHoles.count(Outer))
Outer = nullptr;
- if (StopLoop && Outer == StopLoop)
+ if (StopLoops.contains(Outer))
return true;
}
@@ -204,6 +221,39 @@ bool llvm::isPotentiallyReachableFromMany(
return false;
}
+template <class T> class SingleEntrySet {
+public:
+ using const_iterator = const T *;
+
+ SingleEntrySet(T Elem) : Elem(Elem) {}
+
+ bool contains(T Other) const { return Elem == Other; }
+
+ const_iterator begin() const { return &Elem; }
+ const_iterator end() const { return &Elem + 1; }
+
+private:
+ T Elem;
+};
+
+bool llvm::isPotentiallyReachableFromMany(
+ SmallVectorImpl<BasicBlock *> &Worklist, const BasicBlock *StopBB,
+ const SmallPtrSetImpl<BasicBlock *> *ExclusionSet, const DominatorTree *DT,
+ const LoopInfo *LI) {
+ return isReachableImpl<SingleEntrySet<const BasicBlock *>>(
+ Worklist, SingleEntrySet<const BasicBlock *>(StopBB), ExclusionSet, DT,
+ LI);
+}
+
+bool llvm::isManyPotentiallyReachableFromMany(
+ SmallVectorImpl<BasicBlock *> &Worklist,
+ const SmallPtrSetImpl<const BasicBlock *> &StopSet,
+ const SmallPtrSetImpl<BasicBlock *> *ExclusionSet, const DominatorTree *DT,
+ const LoopInfo *LI) {
+ return isReachableImpl<SmallPtrSetImpl<const BasicBlock *>>(
+ Worklist, StopSet, ExclusionSet, DT, LI);
+}
+
bool llvm::isPotentiallyReachable(
const BasicBlock *A, const BasicBlock *B,
const SmallPtrSetImpl<BasicBlock *> *ExclusionSet, const DominatorTree *DT,
diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp
index 37a7259a5cd0..53a974c5294c 100644
--- a/llvm/lib/Analysis/InstructionSimplify.cpp
+++ b/llvm/lib/Analysis/InstructionSimplify.cpp
@@ -3954,12 +3954,14 @@ static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
// LHS >s RHS.
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- return ConstantExpr::getICmp(ICmpInst::ICMP_SLT, C,
- Constant::getNullValue(C->getType()));
+ return ConstantFoldCompareInstOperands(
+ ICmpInst::ICMP_SLT, C, Constant::getNullValue(C->getType()),
+ Q.DL);
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- return ConstantExpr::getICmp(ICmpInst::ICMP_SGE, C,
- Constant::getNullValue(C->getType()));
+ return ConstantFoldCompareInstOperands(
+ ICmpInst::ICMP_SGE, C, Constant::getNullValue(C->getType()),
+ Q.DL);
// If LHS is non-negative then LHS <u RHS. If LHS is negative then
// LHS >u RHS.
@@ -5346,9 +5348,6 @@ static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
}
- // TODO: Look through bitcasts? What if the bitcast changes the vector element
- // size?
-
// The source operand is not a shuffle. Initialize the root vector value for
// this shuffle if that has not been done yet.
if (!RootVec)
diff --git a/llvm/lib/Analysis/LoopAccessAnalysis.cpp b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
index 13dec3b1e1b0..2a967f570c4a 100644
--- a/llvm/lib/Analysis/LoopAccessAnalysis.cpp
+++ b/llvm/lib/Analysis/LoopAccessAnalysis.cpp
@@ -203,11 +203,9 @@ RuntimeCheckingPtrGroup::RuntimeCheckingPtrGroup(
///
/// There is no conflict when the intervals are disjoint:
/// NoConflict = (P2.Start >= P1.End) || (P1.Start >= P2.End)
-void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
- Type *AccessTy, bool WritePtr,
- unsigned DepSetId, unsigned ASId,
- PredicatedScalarEvolution &PSE,
- bool NeedsFreeze) {
+static std::pair<const SCEV *, const SCEV *>
+getStartAndEndForAccess(const Loop *Lp, const SCEV *PtrExpr, Type *AccessTy,
+ PredicatedScalarEvolution &PSE) {
ScalarEvolution *SE = PSE.getSE();
const SCEV *ScStart;
@@ -215,9 +213,7 @@ void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
if (SE->isLoopInvariant(PtrExpr, Lp)) {
ScStart = ScEnd = PtrExpr;
- } else {
- const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr);
- assert(AR && "Invalid addrec expression");
+ } else if (auto *AR = dyn_cast<SCEVAddRecExpr>(PtrExpr)) {
const SCEV *Ex = PSE.getBackedgeTakenCount();
ScStart = AR->getStart();
@@ -236,16 +232,33 @@ void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
ScStart = SE->getUMinExpr(ScStart, ScEnd);
ScEnd = SE->getUMaxExpr(AR->getStart(), ScEnd);
}
- }
+ } else
+ return {SE->getCouldNotCompute(), SE->getCouldNotCompute()};
+
assert(SE->isLoopInvariant(ScStart, Lp) && "ScStart needs to be invariant");
assert(SE->isLoopInvariant(ScEnd, Lp)&& "ScEnd needs to be invariant");
// Add the size of the pointed element to ScEnd.
auto &DL = Lp->getHeader()->getModule()->getDataLayout();
- Type *IdxTy = DL.getIndexType(Ptr->getType());
+ Type *IdxTy = DL.getIndexType(PtrExpr->getType());
const SCEV *EltSizeSCEV = SE->getStoreSizeOfExpr(IdxTy, AccessTy);
ScEnd = SE->getAddExpr(ScEnd, EltSizeSCEV);
+ return {ScStart, ScEnd};
+}
+
+/// Calculate Start and End points of memory access using
+/// getStartAndEndForAccess.
+void RuntimePointerChecking::insert(Loop *Lp, Value *Ptr, const SCEV *PtrExpr,
+ Type *AccessTy, bool WritePtr,
+ unsigned DepSetId, unsigned ASId,
+ PredicatedScalarEvolution &PSE,
+ bool NeedsFreeze) {
+ const auto &[ScStart, ScEnd] =
+ getStartAndEndForAccess(Lp, PtrExpr, AccessTy, PSE);
+ assert(!isa<SCEVCouldNotCompute>(ScStart) &&
+ !isa<SCEVCouldNotCompute>(ScEnd) &&
+ "must be able to compute both start and end expressions");
Pointers.emplace_back(Ptr, ScStart, ScEnd, WritePtr, DepSetId, ASId, PtrExpr,
NeedsFreeze);
}
@@ -1977,6 +1990,23 @@ getDependenceDistanceStrideAndSize(
InnermostLoop))
return MemoryDepChecker::Dependence::IndirectUnsafe;
+ // Check if we can prove that Sink only accesses memory after Src's end or
+ // vice versa.
+ const auto &[SrcStart, SrcEnd] =
+ getStartAndEndForAccess(InnermostLoop, Src, ATy, PSE);
+ const auto &[SinkStart, SinkEnd] =
+ getStartAndEndForAccess(InnermostLoop, Sink, BTy, PSE);
+
+ if (!isa<SCEVCouldNotCompute>(SrcStart) &&
+ !isa<SCEVCouldNotCompute>(SrcEnd) &&
+ !isa<SCEVCouldNotCompute>(SinkStart) &&
+ !isa<SCEVCouldNotCompute>(SinkEnd)) {
+ if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SrcEnd, SinkStart))
+ return MemoryDepChecker::Dependence::NoDep;
+ if (SE.isKnownPredicate(CmpInst::ICMP_ULE, SinkEnd, SrcStart))
+ return MemoryDepChecker::Dependence::NoDep;
+ }
+
// Need accesses with constant strides and the same direction. We don't want
// to vectorize "A[B[i]] += ..." and similar code or pointer arithmetic that
// could wrap in the address space.
diff --git a/llvm/lib/Analysis/TargetLibraryInfo.cpp b/llvm/lib/Analysis/TargetLibraryInfo.cpp
index 7ce42447b630..592caf2d0e23 100644
--- a/llvm/lib/Analysis/TargetLibraryInfo.cpp
+++ b/llvm/lib/Analysis/TargetLibraryInfo.cpp
@@ -882,6 +882,9 @@ static void initializeLibCalls(TargetLibraryInfoImpl &TLI, const Triple &T,
TLI.setUnavailable(LibFunc_vec_free);
}
+ if (T.isOSAIX())
+ TLI.setUnavailable(LibFunc_memrchr);
+
TLI.addVectorizableFunctionsFromVecLib(ClVectorLibrary, T);
}
diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp
index 2d1486d252c3..3baa8ede28ff 100644
--- a/llvm/lib/Analysis/ValueTracking.cpp
+++ b/llvm/lib/Analysis/ValueTracking.cpp
@@ -2493,9 +2493,20 @@ static bool isNonZeroRecurrence(const PHINode *PN) {
}
}
+static bool matchOpWithOpEqZero(Value *Op0, Value *Op1) {
+ ICmpInst::Predicate Pred;
+ return (match(Op0, m_ZExtOrSExt(m_ICmp(Pred, m_Specific(Op1), m_Zero()))) ||
+ match(Op1, m_ZExtOrSExt(m_ICmp(Pred, m_Specific(Op0), m_Zero())))) &&
+ Pred == ICmpInst::ICMP_EQ;
+}
+
static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
const SimplifyQuery &Q, unsigned BitWidth, Value *X,
Value *Y, bool NSW, bool NUW) {
+ // (X + (X != 0)) is non zero
+ if (matchOpWithOpEqZero(X, Y))
+ return true;
+
if (NUW)
return isKnownNonZero(Y, DemandedElts, Q, Depth) ||
isKnownNonZero(X, DemandedElts, Q, Depth);
@@ -2539,6 +2550,11 @@ static bool isNonZeroAdd(const APInt &DemandedElts, unsigned Depth,
static bool isNonZeroSub(const APInt &DemandedElts, unsigned Depth,
const SimplifyQuery &Q, unsigned BitWidth, Value *X,
Value *Y) {
+ // (X - (X != 0)) is non zero
+ // ((X != 0) - X) is non zero
+ if (matchOpWithOpEqZero(X, Y))
+ return true;
+
// TODO: Move this case into isKnownNonEqual().
if (auto *C = dyn_cast<Constant>(X))
if (C->isNullValue() && isKnownNonZero(Y, DemandedElts, Q, Depth))
@@ -2698,7 +2714,15 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
case Instruction::Sub:
return isNonZeroSub(DemandedElts, Depth, Q, BitWidth, I->getOperand(0),
I->getOperand(1));
+ case Instruction::Xor:
+ // (X ^ (X != 0)) is non zero
+ if (matchOpWithOpEqZero(I->getOperand(0), I->getOperand(1)))
+ return true;
+ break;
case Instruction::Or:
+ // (X | (X != 0)) is non zero
+ if (matchOpWithOpEqZero(I->getOperand(0), I->getOperand(1)))
+ return true;
// X | Y != 0 if X != 0 or Y != 0.
return isKnownNonZero(I->getOperand(1), DemandedElts, Q, Depth) ||
isKnownNonZero(I->getOperand(0), DemandedElts, Q, Depth);
@@ -2989,6 +3013,11 @@ static bool isKnownNonZeroFromOperator(const Operator *I,
return isKnownNonZero(II->getArgOperand(0), Q, Depth);
case Intrinsic::umax:
case Intrinsic::uadd_sat:
+ // umax(X, (X != 0)) is non zero
+ // X +usat (X != 0) is non zero
+ if (matchOpWithOpEqZero(II->getArgOperand(0), II->getArgOperand(1)))
+ return true;
+
return isKnownNonZero(II->getArgOperand(1), DemandedElts, Q, Depth) ||
isKnownNonZero(II->getArgOperand(0), DemandedElts, Q, Depth);
case Intrinsic::smax: {
@@ -4940,11 +4969,8 @@ void computeKnownFPClass(const Value *V, const APInt &DemandedElts,
// subnormal input could produce a negative zero output.
const Function *F = II->getFunction();
if (Q.IIQ.hasNoSignedZeros(II) ||
- (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType()))) {
+ (F && KnownSrc.isKnownNeverLogicalNegZero(*F, II->getType())))
Known.knownNot(fcNegZero);
- if (KnownSrc.isKnownNeverNaN())
- Known.signBitMustBeZero();
- }
break;
}
diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
index d50cdc4323ec..c5755b9bdc8d 100644
--- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
+++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinter.cpp
@@ -2463,11 +2463,15 @@ bool AsmPrinter::doFinalization(Module &M) {
emitGlobalIFunc(M, IFunc);
// Finalize debug and EH information.
+ // Defer MCAssembler based constant folding due to a performance issue. The
+ // label differences will be evaluated at write time.
+ OutStreamer->setUseAssemblerInfoForParsing(false);
for (const HandlerInfo &HI : Handlers) {
NamedRegionTimer T(HI.TimerName, HI.TimerDescription, HI.TimerGroupName,
HI.TimerGroupDescription, TimePassesIsEnabled);
HI.Handler->endModule();
}
+ OutStreamer->setUseAssemblerInfoForParsing(true);
// This deletes all the ephemeral handlers that AsmPrinter added, while
// keeping all the user-added handlers alive until the AsmPrinter is
diff --git a/llvm/lib/CodeGen/MachineScheduler.cpp b/llvm/lib/CodeGen/MachineScheduler.cpp
index 78d581c8cead..0858be64de40 100644
--- a/llvm/lib/CodeGen/MachineScheduler.cpp
+++ b/llvm/lib/CodeGen/MachineScheduler.cpp
@@ -1664,7 +1664,8 @@ void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
if (ShouldTrackPressure) {
// Update top scheduled pressure.
RegisterOperands RegOpers;
- RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
+ RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks,
+ /*IgnoreDead=*/false);
if (ShouldTrackLaneMasks) {
// Adjust liveness and add missing dead+read-undef flags.
SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
@@ -1698,7 +1699,8 @@ void ScheduleDAGMILive::scheduleMI(SUnit *SU, bool IsTopNode) {
}
if (ShouldTrackPressure) {
RegisterOperands RegOpers;
- RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks, false);
+ RegOpers.collect(*MI, *TRI, MRI, ShouldTrackLaneMasks,
+ /*IgnoreDead=*/false);
if (ShouldTrackLaneMasks) {
// Adjust liveness and add missing dead+read-undef flags.
SlotIndex SlotIdx = LIS->getInstructionIndex(*MI).getRegSlot();
diff --git a/llvm/lib/CodeGen/RegisterPressure.cpp b/llvm/lib/CodeGen/RegisterPressure.cpp
index 3fa22447f416..9a7eb49666b2 100644
--- a/llvm/lib/CodeGen/RegisterPressure.cpp
+++ b/llvm/lib/CodeGen/RegisterPressure.cpp
@@ -873,7 +873,7 @@ void RegPressureTracker::recede(SmallVectorImpl<RegisterMaskPair> *LiveUses) {
const MachineInstr &MI = *CurrPos;
RegisterOperands RegOpers;
- RegOpers.collect(MI, *TRI, *MRI, TrackLaneMasks, false);
+ RegOpers.collect(MI, *TRI, *MRI, TrackLaneMasks, /*IgnoreDead=*/false);
if (TrackLaneMasks) {
SlotIndex SlotIdx = LIS->getInstructionIndex(*CurrPos).getRegSlot();
RegOpers.adjustLaneLiveness(*LIS, *MRI, SlotIdx);
@@ -1041,7 +1041,7 @@ void RegPressureTracker::bumpUpwardPressure(const MachineInstr *MI) {
// Account for register pressure similar to RegPressureTracker::recede().
RegisterOperands RegOpers;
RegOpers.collect(*MI, *TRI, *MRI, TrackLaneMasks, /*IgnoreDead=*/true);
- assert(RegOpers.DeadDefs.size() == 0);
+ assert(RegOpers.DeadDefs.empty());
if (TrackLaneMasks)
RegOpers.adjustLaneLiveness(*LIS, *MRI, SlotIdx);
else if (RequireIntervals)
@@ -1290,7 +1290,7 @@ void RegPressureTracker::bumpDownwardPressure(const MachineInstr *MI) {
// Account for register pressure similar to RegPressureTracker::recede().
RegisterOperands RegOpers;
- RegOpers.collect(*MI, *TRI, *MRI, TrackLaneMasks, false);
+ RegOpers.collect(*MI, *TRI, *MRI, TrackLaneMasks, /*IgnoreDead=*/false);
if (TrackLaneMasks)
RegOpers.adjustLaneLiveness(*LIS, *MRI, SlotIdx);
diff --git a/llvm/lib/CodeGen/ScheduleDAG.cpp b/llvm/lib/CodeGen/ScheduleDAG.cpp
index de8e6f63794d..8d9a5041fc2f 100644
--- a/llvm/lib/CodeGen/ScheduleDAG.cpp
+++ b/llvm/lib/CodeGen/ScheduleDAG.cpp
@@ -331,8 +331,10 @@ void SUnit::biasCriticalPath() {
unsigned MaxDepth = BestI->getSUnit()->getDepth();
for (SUnit::pred_iterator I = std::next(BestI), E = Preds.end(); I != E;
++I) {
- if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth)
+ if (I->getKind() == SDep::Data && I->getSUnit()->getDepth() > MaxDepth) {
+ MaxDepth = I->getSUnit()->getDepth();
BestI = I;
+ }
}
if (BestI != Preds.begin())
std::swap(*Preds.begin(), *BestI);
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
index 0543c211c497..bfc2273c9425 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp
@@ -2050,8 +2050,15 @@ SDValue SelectionDAGLegalize::ExpandSPLAT_VECTOR(SDNode *Node) {
std::pair<SDValue, SDValue> SelectionDAGLegalize::ExpandLibCall(RTLIB::Libcall LC, SDNode *Node,
TargetLowering::ArgListTy &&Args,
bool isSigned) {
- SDValue Callee = DAG.getExternalSymbol(TLI.getLibcallName(LC),
- TLI.getPointerTy(DAG.getDataLayout()));
+ EVT CodePtrTy = TLI.getPointerTy(DAG.getDataLayout());
+ SDValue Callee;
+ if (const char *LibcallName = TLI.getLibcallName(LC))
+ Callee = DAG.getExternalSymbol(LibcallName, CodePtrTy);
+ else {
+ Callee = DAG.getUNDEF(CodePtrTy);
+ DAG.getContext()->emitError(Twine("no libcall available for ") +
+ Node->getOperationName(&DAG));
+ }
EVT RetVT = Node->getValueType(0);
Type *RetTy = RetVT.getTypeForEVT(*DAG.getContext());
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
index 98f64947bcab..c64e27fe4563 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp
@@ -1511,8 +1511,7 @@ SDValue DAGTypeLegalizer::PromoteIntRes_VPFunnelShift(SDNode *N) {
!TLI.isOperationLegalOrCustom(Opcode, VT)) {
SDValue HiShift = DAG.getConstant(OldBits, DL, VT);
Hi = DAG.getNode(ISD::VP_SHL, DL, VT, Hi, HiShift, Mask, EVL);
- // FIXME: Replace it by vp operations.
- Lo = DAG.getZeroExtendInReg(Lo, DL, OldVT);
+ Lo = DAG.getVPZeroExtendInReg(Lo, Mask, EVL, DL, OldVT);
SDValue Res = DAG.getNode(ISD::VP_OR, DL, VT, Hi, Lo, Mask, EVL);
Res = DAG.getNode(IsFSHR ? ISD::VP_LSHR : ISD::VP_SHL, DL, VT, Res, Amt,
Mask, EVL);
@@ -2375,10 +2374,8 @@ SDValue DAGTypeLegalizer::PromoteIntOp_VP_ZERO_EXTEND(SDNode *N) {
// FIXME: There is no VP_ANY_EXTEND yet.
Op = DAG.getNode(ISD::VP_ZERO_EXTEND, dl, VT, Op, N->getOperand(1),
N->getOperand(2));
- APInt Imm = APInt::getLowBitsSet(VT.getScalarSizeInBits(),
- N->getOperand(0).getScalarValueSizeInBits());
- return DAG.getNode(ISD::VP_AND, dl, VT, Op, DAG.getConstant(Imm, dl, VT),
- N->getOperand(1), N->getOperand(2));
+ return DAG.getVPZeroExtendInReg(Op, N->getOperand(1), N->getOperand(2), dl,
+ N->getOperand(0).getValueType());
}
SDValue DAGTypeLegalizer::PromoteIntOp_FIX(SDNode *N) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index dca5a481fbd0..ec0513591566 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -2911,18 +2911,10 @@ void DAGTypeLegalizer::SplitVecRes_VECTOR_REVERSE(SDNode *N, SDValue &Lo,
void DAGTypeLegalizer::SplitVecRes_VECTOR_SPLICE(SDNode *N, SDValue &Lo,
SDValue &Hi) {
- EVT VT = N->getValueType(0);
SDLoc DL(N);
- EVT LoVT, HiVT;
- std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VT);
-
SDValue Expanded = TLI.expandVectorSplice(N, DAG);
- Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, Expanded,
- DAG.getVectorIdxConstant(0, DL));
- Hi =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, Expanded,
- DAG.getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL));
+ std::tie(Lo, Hi) = DAG.SplitVector(Expanded, DL);
}
void DAGTypeLegalizer::SplitVecRes_VP_REVERSE(SDNode *N, SDValue &Lo,
@@ -2967,12 +2959,7 @@ void DAGTypeLegalizer::SplitVecRes_VP_REVERSE(SDNode *N, SDValue &Lo,
SDValue Load = DAG.getLoadVP(VT, DL, Store, StackPtr, Mask, EVL, LoadMMO);
- auto [LoVT, HiVT] = DAG.GetSplitDestVTs(VT);
- Lo = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, LoVT, Load,
- DAG.getVectorIdxConstant(0, DL));
- Hi =
- DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, HiVT, Load,
- DAG.getVectorIdxConstant(LoVT.getVectorMinNumElements(), DL));
+ std::tie(Lo, Hi) = DAG.SplitVector(Load, DL);
}
void DAGTypeLegalizer::SplitVecRes_VECTOR_DEINTERLEAVE(SDNode *N) {
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
index 72685a2d7721..777bbf071732 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp
@@ -1540,6 +1540,25 @@ SDValue SelectionDAG::getZeroExtendInReg(SDValue Op, const SDLoc &DL, EVT VT) {
return getNode(ISD::AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT));
}
+SDValue SelectionDAG::getVPZeroExtendInReg(SDValue Op, SDValue Mask,
+ SDValue EVL, const SDLoc &DL,
+ EVT VT) {
+ EVT OpVT = Op.getValueType();
+ assert(VT.isInteger() && OpVT.isInteger() &&
+ "Cannot getVPZeroExtendInReg FP types");
+ assert(VT.isVector() && OpVT.isVector() &&
+ "getVPZeroExtendInReg type and operand type should be vector!");
+ assert(VT.getVectorElementCount() == OpVT.getVectorElementCount() &&
+ "Vector element counts must match in getZeroExtendInReg");
+ assert(VT.bitsLE(OpVT) && "Not extending!");
+ if (OpVT == VT)
+ return Op;
+ APInt Imm = APInt::getLowBitsSet(OpVT.getScalarSizeInBits(),
+ VT.getScalarSizeInBits());
+ return getNode(ISD::VP_AND, DL, OpVT, Op, getConstant(Imm, DL, OpVT), Mask,
+ EVL);
+}
+
SDValue SelectionDAG::getPtrExtOrTrunc(SDValue Op, const SDLoc &DL, EVT VT) {
// Only unsigned pointer semantics are supported right now. In the future this
// might delegate to TLI to check pointer signedness.
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index 09b70cfb7227..82a59918b085 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -227,6 +227,34 @@ void TargetLoweringBase::InitLibcalls(const Triple &TT) {
CallingConv::ARM_AAPCS_VFP);
}
}
+
+ switch (TT.getOS()) {
+ case Triple::MacOSX:
+ if (TT.isMacOSXVersionLT(10, 9)) {
+ setLibcallName(RTLIB::EXP10_F32, nullptr);
+ setLibcallName(RTLIB::EXP10_F64, nullptr);
+ } else {
+ setLibcallName(RTLIB::EXP10_F32, "__exp10f");
+ setLibcallName(RTLIB::EXP10_F64, "__exp10");
+ }
+ break;
+ case Triple::IOS:
+ case Triple::TvOS:
+ case Triple::WatchOS:
+ case Triple::XROS:
+ if (!TT.isWatchOS() &&
+ (TT.isOSVersionLT(7, 0) || (TT.isOSVersionLT(9, 0) && TT.isX86()))) {
+ setLibcallName(RTLIB::EXP10_F32, nullptr);
+ setLibcallName(RTLIB::EXP10_F64, nullptr);
+ } else {
+ setLibcallName(RTLIB::EXP10_F32, "__exp10f");
+ setLibcallName(RTLIB::EXP10_F64, "__exp10");
+ }
+
+ break;
+ default:
+ break;
+ }
} else {
setLibcallName(RTLIB::FPEXT_F16_F32, "__gnu_h2f_ieee");
setLibcallName(RTLIB::FPROUND_F32_F16, "__gnu_f2h_ieee");
diff --git a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
index 3e1897ce670a..0fc915d89f6c 100644
--- a/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringObjectFileImpl.cpp
@@ -523,6 +523,8 @@ static unsigned getELFSectionType(StringRef Name, SectionKind K) {
if (hasPrefix(Name, ".llvm.offloading"))
return ELF::SHT_LLVM_OFFLOADING;
+ if (Name == ".llvm.lto")
+ return ELF::SHT_LLVM_LTO;
if (K.isBSS() || K.isThreadBSS())
return ELF::SHT_NOBITS;
diff --git a/llvm/lib/IR/IRBuilder.cpp b/llvm/lib/IR/IRBuilder.cpp
index c6f20af0f1df..0d6760ed0841 100644
--- a/llvm/lib/IR/IRBuilder.cpp
+++ b/llvm/lib/IR/IRBuilder.cpp
@@ -1053,9 +1053,8 @@ Value *IRBuilderBase::CreateFCmpHelper(
return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
}
- if (auto *LC = dyn_cast<Constant>(LHS))
- if (auto *RC = dyn_cast<Constant>(RHS))
- return Insert(Folder.CreateFCmp(P, LC, RC), Name);
+ if (auto *V = Folder.FoldCmp(P, LHS, RHS))
+ return V;
return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
}
diff --git a/llvm/lib/IR/MDBuilder.cpp b/llvm/lib/IR/MDBuilder.cpp
index 0bf41d7cc7c2..bd68db3a6f96 100644
--- a/llvm/lib/IR/MDBuilder.cpp
+++ b/llvm/lib/IR/MDBuilder.cpp
@@ -86,9 +86,8 @@ MDNode *MDBuilder::createFunctionEntryCount(
}
MDNode *MDBuilder::createFunctionSectionPrefix(StringRef Prefix) {
- return MDNode::get(Context,
- {createString("function_section_prefix"),
- createString(Prefix)});
+ return MDNode::get(
+ Context, {createString("function_section_prefix"), createString(Prefix)});
}
MDNode *MDBuilder::createRange(const APInt &Lo, const APInt &Hi) {
@@ -148,9 +147,10 @@ MDNode *MDBuilder::mergeCallbackEncodings(MDNode *ExistingCallbacks,
for (unsigned u = 0; u < NumExistingOps; u++) {
Ops[u] = ExistingCallbacks->getOperand(u);
- auto *OldCBCalleeIdxAsCM = cast<ConstantAsMetadata>(Ops[u]);
+ auto *OldCBCalleeIdxAsCM =
+ cast<ConstantAsMetadata>(cast<MDNode>(Ops[u])->getOperand(0));
uint64_t OldCBCalleeIdx =
- cast<ConstantInt>(OldCBCalleeIdxAsCM->getValue())->getZExtValue();
+ cast<ConstantInt>(OldCBCalleeIdxAsCM->getValue())->getZExtValue();
(void)OldCBCalleeIdx;
assert(NewCBCalleeIdx != OldCBCalleeIdx &&
"Cannot map a callback callee index twice!");
@@ -339,8 +339,8 @@ MDNode *MDBuilder::createMutableTBAAAccessTag(MDNode *Tag) {
MDNode *MDBuilder::createIrrLoopHeaderWeight(uint64_t Weight) {
Metadata *Vals[] = {
- createString("loop_header_weight"),
- createConstant(ConstantInt::get(Type::getInt64Ty(Context), Weight)),
+ createString("loop_header_weight"),
+ createConstant(ConstantInt::get(Type::getInt64Ty(Context), Weight)),
};
return MDNode::get(Context, Vals);
}
diff --git a/llvm/lib/IR/Module.cpp b/llvm/lib/IR/Module.cpp
index a8696ed9e3ce..f97dd18c736c 100644
--- a/llvm/lib/IR/Module.cpp
+++ b/llvm/lib/IR/Module.cpp
@@ -882,7 +882,7 @@ StringRef Module::getDarwinTargetVariantTriple() const {
}
void Module::setDarwinTargetVariantTriple(StringRef T) {
- addModuleFlag(ModFlagBehavior::Override, "darwin.target_variant.triple",
+ addModuleFlag(ModFlagBehavior::Warning, "darwin.target_variant.triple",
MDString::get(getContext(), T));
}
diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp
index 5c603ac6ab47..e2754d74979e 100644
--- a/llvm/lib/LTO/LTO.cpp
+++ b/llvm/lib/LTO/LTO.cpp
@@ -121,6 +121,9 @@ void llvm::computeLTOCacheKey(
support::endian::write64le(Data, I);
Hasher.update(Data);
};
+ auto AddUint8 = [&](const uint8_t I) {
+ Hasher.update(ArrayRef<uint8_t>((const uint8_t *)&I, 1));
+ };
AddString(Conf.CPU);
// FIXME: Hash more of Options. For now all clients initialize Options from
// command-line flags (which is unsupported in production), but may set
@@ -156,18 +159,18 @@ void llvm::computeLTOCacheKey(
auto ModHash = Index.getModuleHash(ModuleID);
Hasher.update(ArrayRef<uint8_t>((uint8_t *)&ModHash[0], sizeof(ModHash)));
- std::vector<uint64_t> ExportsGUID;
+ std::vector<std::pair<uint64_t, uint8_t>> ExportsGUID;
ExportsGUID.reserve(ExportList.size());
- for (const auto &VI : ExportList) {
- auto GUID = VI.getGUID();
- ExportsGUID.push_back(GUID);
- }
+ for (const auto &[VI, ExportType] : ExportList)
+ ExportsGUID.push_back(
+ std::make_pair(VI.getGUID(), static_cast<uint8_t>(ExportType)));
// Sort the export list elements GUIDs.
llvm::sort(ExportsGUID);
- for (uint64_t GUID : ExportsGUID) {
+ for (auto [GUID, ExportType] : ExportsGUID) {
// The export list can impact the internalization, be conservative here
Hasher.update(ArrayRef<uint8_t>((uint8_t *)&GUID, sizeof(GUID)));
+ AddUint8(ExportType);
}
// Include the hash for every module we import functions from. The set of
@@ -199,7 +202,7 @@ void llvm::computeLTOCacheKey(
[](const ImportModule &Lhs, const ImportModule &Rhs) -> bool {
return Lhs.getHash() < Rhs.getHash();
});
- std::vector<uint64_t> ImportedGUIDs;
+ std::vector<std::pair<uint64_t, uint8_t>> ImportedGUIDs;
for (const ImportModule &Entry : ImportModulesVector) {
auto ModHash = Entry.getHash();
Hasher.update(ArrayRef<uint8_t>((uint8_t *)&ModHash[0], sizeof(ModHash)));
@@ -207,11 +210,13 @@ void llvm::computeLTOCacheKey(
AddUint64(Entry.getFunctions().size());
ImportedGUIDs.clear();
- for (auto &Fn : Entry.getFunctions())
- ImportedGUIDs.push_back(Fn);
+ for (auto &[Fn, ImportType] : Entry.getFunctions())
+ ImportedGUIDs.push_back(std::make_pair(Fn, ImportType));
llvm::sort(ImportedGUIDs);
- for (auto &GUID : ImportedGUIDs)
+ for (auto &[GUID, Type] : ImportedGUIDs) {
AddUint64(GUID);
+ AddUint8(Type);
+ }
}
// Include the hash for the resolved ODR.
@@ -281,9 +286,9 @@ void llvm::computeLTOCacheKey(
// Imported functions may introduce new uses of type identifier resolutions,
// so we need to collect their used resolutions as well.
for (const ImportModule &ImpM : ImportModulesVector)
- for (auto &ImpF : ImpM.getFunctions()) {
+ for (auto &[GUID, UnusedImportType] : ImpM.getFunctions()) {
GlobalValueSummary *S =
- Index.findSummaryInModule(ImpF, ImpM.getIdentifier());
+ Index.findSummaryInModule(GUID, ImpM.getIdentifier());
AddUsedThings(S);
// If this is an alias, we also care about any types/etc. that the aliasee
// may reference.
@@ -1395,6 +1400,7 @@ public:
llvm::StringRef ModulePath,
const std::string &NewModulePath) {
std::map<std::string, GVSummaryMapTy> ModuleToSummariesForIndex;
+
std::error_code EC;
gatherImportedSummariesForModule(ModulePath, ModuleToDefinedGVSummaries,
ImportList, ModuleToSummariesForIndex);
@@ -1403,6 +1409,8 @@ public:
sys::fs::OpenFlags::OF_None);
if (EC)
return errorCodeToError(EC);
+
+ // TODO: Serialize declaration bits to bitcode.
writeIndexToFile(CombinedIndex, OS, &ModuleToSummariesForIndex);
if (ShouldEmitImportsFiles) {
diff --git a/llvm/lib/LTO/LTOBackend.cpp b/llvm/lib/LTO/LTOBackend.cpp
index 21aed799d6fa..76223e88ca1a 100644
--- a/llvm/lib/LTO/LTOBackend.cpp
+++ b/llvm/lib/LTO/LTOBackend.cpp
@@ -720,7 +720,14 @@ bool lto::initImportList(const Module &M,
if (Summary->modulePath() == M.getModuleIdentifier())
continue;
// Add an entry to provoke importing by thinBackend.
- ImportList[Summary->modulePath()].insert(GUID);
+ // Try emplace the entry first. If an entry with the same key already
+ // exists, set the value to 'std::min(existing-value, new-value)' to make
+ // sure a definition takes precedence over a declaration.
+ auto [Iter, Inserted] = ImportList[Summary->modulePath()].try_emplace(
+ GUID, Summary->importType());
+
+ if (!Inserted)
+ Iter->second = std::min(Iter->second, Summary->importType());
}
}
return true;
diff --git a/llvm/lib/ProfileData/SampleProfReader.cpp b/llvm/lib/ProfileData/SampleProfReader.cpp
index f91a0e6177ea..a4b2d0668a5a 100644
--- a/llvm/lib/ProfileData/SampleProfReader.cpp
+++ b/llvm/lib/ProfileData/SampleProfReader.cpp
@@ -1822,9 +1822,9 @@ setupMemoryBuffer(const Twine &Filename, vfs::FileSystem &FS) {
///
/// \returns an error code indicating the status of the created reader.
ErrorOr<std::unique_ptr<SampleProfileReader>>
-SampleProfileReader::create(const std::string Filename, LLVMContext &C,
+SampleProfileReader::create(StringRef Filename, LLVMContext &C,
vfs::FileSystem &FS, FSDiscriminatorPass P,
- const std::string RemapFilename) {
+ StringRef RemapFilename) {
auto BufferOrError = setupMemoryBuffer(Filename, FS);
if (std::error_code EC = BufferOrError.getError())
return EC;
@@ -1842,7 +1842,7 @@ SampleProfileReader::create(const std::string Filename, LLVMContext &C,
///
/// \returns an error code indicating the status of the created reader.
ErrorOr<std::unique_ptr<SampleProfileReaderItaniumRemapper>>
-SampleProfileReaderItaniumRemapper::create(const std::string Filename,
+SampleProfileReaderItaniumRemapper::create(StringRef Filename,
vfs::FileSystem &FS,
SampleProfileReader &Reader,
LLVMContext &C) {
@@ -1895,7 +1895,7 @@ SampleProfileReaderItaniumRemapper::create(std::unique_ptr<MemoryBuffer> &B,
ErrorOr<std::unique_ptr<SampleProfileReader>>
SampleProfileReader::create(std::unique_ptr<MemoryBuffer> &B, LLVMContext &C,
vfs::FileSystem &FS, FSDiscriminatorPass P,
- const std::string RemapFilename) {
+ StringRef RemapFilename) {
std::unique_ptr<SampleProfileReader> Reader;
if (SampleProfileReaderRawBinary::hasFormat(*B))
Reader.reset(new SampleProfileReaderRawBinary(std::move(B), C));
diff --git a/llvm/lib/Support/LockFileManager.cpp b/llvm/lib/Support/LockFileManager.cpp
index 083f8d7b37be..3169aa25ec0d 100644
--- a/llvm/lib/Support/LockFileManager.cpp
+++ b/llvm/lib/Support/LockFileManager.cpp
@@ -66,7 +66,7 @@ LockFileManager::readLockFile(StringRef LockFileName) {
StringRef Hostname;
StringRef PIDStr;
std::tie(Hostname, PIDStr) = getToken(MB.getBuffer(), " ");
- PIDStr = PIDStr.substr(PIDStr.find_first_not_of(" "));
+ PIDStr = PIDStr.substr(PIDStr.find_first_not_of(' '));
int PID;
if (!PIDStr.getAsInteger(10, PID)) {
auto Owner = std::make_pair(std::string(Hostname), PID);
diff --git a/llvm/lib/Support/raw_socket_stream.cpp b/llvm/lib/Support/raw_socket_stream.cpp
index 14e2308df4d7..549d537709bf 100644
--- a/llvm/lib/Support/raw_socket_stream.cpp
+++ b/llvm/lib/Support/raw_socket_stream.cpp
@@ -204,17 +204,26 @@ ListeningSocket::accept(std::chrono::milliseconds Timeout) {
auto Start = std::chrono::steady_clock::now();
#ifdef _WIN32
PollStatus = WSAPoll(FDs, 2, RemainingTime);
- if (PollStatus == SOCKET_ERROR) {
#else
PollStatus = ::poll(FDs, 2, RemainingTime);
+#endif
+ // If FD equals -1 then ListeningSocket::shutdown has been called and it is
+ // appropriate to return operation_canceled
+ if (FD.load() == -1)
+ return llvm::make_error<StringError>(
+ std::make_error_code(std::errc::operation_canceled),
+ "Accept canceled");
+
+#if _WIN32
+ if (PollStatus == SOCKET_ERROR) {
+#else
if (PollStatus == -1) {
#endif
- // Ignore error if caused by interupting signal
std::error_code PollErrCode = getLastSocketErrorCode();
+ // Ignore EINTR (signal occured before any request event) and retry
if (PollErrCode != std::errc::interrupted)
return llvm::make_error<StringError>(PollErrCode, "FD poll failed");
}
-
if (PollStatus == 0)
return llvm::make_error<StringError>(
std::make_error_code(std::errc::timed_out),
@@ -222,13 +231,7 @@ ListeningSocket::accept(std::chrono::milliseconds Timeout) {
if (FDs[0].revents & POLLNVAL)
return llvm::make_error<StringError>(
- std::make_error_code(std::errc::bad_file_descriptor),
- "File descriptor closed by another thread");
-
- if (FDs[1].revents & POLLIN)
- return llvm::make_error<StringError>(
- std::make_error_code(std::errc::operation_canceled),
- "Accept canceled");
+ std::make_error_code(std::errc::bad_file_descriptor));
auto Stop = std::chrono::steady_clock::now();
ElapsedTime +=
diff --git a/llvm/lib/Target/AArch64/AArch64PointerAuth.cpp b/llvm/lib/Target/AArch64/AArch64PointerAuth.cpp
index abde099be382..90bf089dbebf 100644
--- a/llvm/lib/Target/AArch64/AArch64PointerAuth.cpp
+++ b/llvm/lib/Target/AArch64/AArch64PointerAuth.cpp
@@ -231,7 +231,7 @@ MachineMemOperand *createCheckMemOperand(MachineFunction &MF,
} // namespace
-MachineBasicBlock &llvm::AArch64PAuth::checkAuthenticatedRegister(
+void llvm::AArch64PAuth::checkAuthenticatedRegister(
MachineBasicBlock::iterator MBBI, AuthCheckMethod Method,
Register AuthenticatedReg, Register TmpReg, bool UseIKey, unsigned BrkImm) {
@@ -246,13 +246,13 @@ MachineBasicBlock &llvm::AArch64PAuth::checkAuthenticatedRegister(
default:
break;
case AuthCheckMethod::None:
- return MBB;
+ return;
case AuthCheckMethod::DummyLoad:
BuildMI(MBB, MBBI, DL, TII->get(AArch64::LDRWui), getWRegFromXReg(TmpReg))
.addReg(AuthenticatedReg)
.addImm(0)
.addMemOperand(createCheckMemOperand(MF, Subtarget));
- return MBB;
+ return;
}
// Control flow has to be changed, so arrange new MBBs.
@@ -287,7 +287,7 @@ MachineBasicBlock &llvm::AArch64PAuth::checkAuthenticatedRegister(
.addReg(TmpReg)
.addImm(62)
.addMBB(BreakBlock);
- return *SuccessBlock;
+ return;
case AuthCheckMethod::XPACHint:
assert(AuthenticatedReg == AArch64::LR &&
"XPACHint mode is only compatible with checking the LR register");
@@ -304,7 +304,7 @@ MachineBasicBlock &llvm::AArch64PAuth::checkAuthenticatedRegister(
BuildMI(CheckBlock, DL, TII->get(AArch64::Bcc))
.addImm(AArch64CC::NE)
.addMBB(BreakBlock);
- return *SuccessBlock;
+ return;
}
llvm_unreachable("Unknown AuthCheckMethod enum");
}
diff --git a/llvm/lib/Target/AArch64/AArch64PointerAuth.h b/llvm/lib/Target/AArch64/AArch64PointerAuth.h
index e1ceaed58abe..4ffda7478224 100644
--- a/llvm/lib/Target/AArch64/AArch64PointerAuth.h
+++ b/llvm/lib/Target/AArch64/AArch64PointerAuth.h
@@ -98,14 +98,10 @@ enum class AuthCheckMethod {
/// using an I-key or D-key and which register can be used as temporary.
/// If an explicit BRK instruction is used to generate an exception, BrkImm
/// specifies its immediate operand.
-///
-/// \returns The machine basic block containing the code that is executed
-/// after the check succeeds.
-MachineBasicBlock &checkAuthenticatedRegister(MachineBasicBlock::iterator MBBI,
- AuthCheckMethod Method,
- Register AuthenticatedReg,
- Register TmpReg, bool UseIKey,
- unsigned BrkImm);
+void checkAuthenticatedRegister(MachineBasicBlock::iterator MBBI,
+ AuthCheckMethod Method,
+ Register AuthenticatedReg, Register TmpReg,
+ bool UseIKey, unsigned BrkImm);
/// Returns the number of bytes added by checkAuthenticatedRegister.
unsigned getCheckerSizeInBytes(AuthCheckMethod Method);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
index 86f77f7b64e8..fa7492ac6cbe 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUInstructions.td
@@ -168,30 +168,20 @@ def brtarget : Operand<OtherVT>;
class HasOneUseUnaryOp<SDPatternOperator op> : PatFrag<
(ops node:$src0),
- (op $src0),
- [{ return N->hasOneUse(); }]> {
-
- let GISelPredicateCode = [{
- return MRI.hasOneNonDBGUse(MI.getOperand(0).getReg());
- }];
+ (op $src0)> {
+ let HasOneUse = 1;
}
class HasOneUseBinOp<SDPatternOperator op> : PatFrag<
(ops node:$src0, node:$src1),
- (op $src0, $src1),
- [{ return N->hasOneUse(); }]> {
- let GISelPredicateCode = [{
- return MRI.hasOneNonDBGUse(MI.getOperand(0).getReg());
- }];
+ (op $src0, $src1)> {
+ let HasOneUse = 1;
}
class HasOneUseTernaryOp<SDPatternOperator op> : PatFrag<
(ops node:$src0, node:$src1, node:$src2),
- (op $src0, $src1, $src2),
- [{ return N->hasOneUse(); }]> {
- let GISelPredicateCode = [{
- return MRI.hasOneNonDBGUse(MI.getOperand(0).getReg());
- }];
+ (op $src0, $src1, $src2)> {
+ let HasOneUse = 1;
}
class is_canonicalized_1<SDPatternOperator op> : PatFrag<
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index 15a4b6796880..a771b421e77a 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -4168,7 +4168,7 @@ bool AMDGPULegalizerInfo::legalizeCTLZ_ZERO_UNDEF(MachineInstr &MI,
auto ShiftAmt = B.buildConstant(S32, 32u - NumBits);
auto Extend = B.buildAnyExt(S32, {Src}).getReg(0u);
- auto Shift = B.buildLShr(S32, {Extend}, ShiftAmt);
+ auto Shift = B.buildShl(S32, Extend, ShiftAmt);
auto Ctlz = B.buildInstr(AMDGPU::G_AMDGPU_FFBH_U32, {S32}, {Shift});
B.buildTrunc(Dst, Ctlz);
MI.eraseFromParent();
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
index faf04c3c7e70..c515138d95a2 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULibCalls.cpp
@@ -22,6 +22,7 @@
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
+#include "llvm/IR/MDBuilder.h"
#include "llvm/IR/PatternMatch.h"
#include "llvm/InitializePasses.h"
#include <cmath>
@@ -1156,35 +1157,49 @@ bool AMDGPULibCalls::fold_pow(FPMathOperator *FPOp, IRBuilder<> &B,
bool AMDGPULibCalls::fold_rootn(FPMathOperator *FPOp, IRBuilder<> &B,
const FuncInfo &FInfo) {
- // skip vector function
- if (getVecSize(FInfo) != 1)
- return false;
-
Value *opr0 = FPOp->getOperand(0);
Value *opr1 = FPOp->getOperand(1);
- ConstantInt *CINT = dyn_cast<ConstantInt>(opr1);
- if (!CINT) {
+ const APInt *CINT = nullptr;
+ if (!match(opr1, m_APIntAllowPoison(CINT)))
return false;
- }
+
+ Function *Parent = B.GetInsertBlock()->getParent();
+
int ci_opr1 = (int)CINT->getSExtValue();
- if (ci_opr1 == 1) { // rootn(x, 1) = x
- LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> " << *opr0 << "\n");
+ if (ci_opr1 == 1 && !Parent->hasFnAttribute(Attribute::StrictFP)) {
+ // rootn(x, 1) = x
+ //
+ // TODO: Insert constrained canonicalize for strictfp case.
+ LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> " << *opr0 << '\n');
replaceCall(FPOp, opr0);
return true;
}
Module *M = B.GetInsertBlock()->getModule();
- if (ci_opr1 == 2) { // rootn(x, 2) = sqrt(x)
- if (FunctionCallee FPExpr =
- getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_SQRT, FInfo))) {
- LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> sqrt(" << *opr0
- << ")\n");
- Value *nval = CreateCallEx(B,FPExpr, opr0, "__rootn2sqrt");
- replaceCall(FPOp, nval);
- return true;
- }
- } else if (ci_opr1 == 3) { // rootn(x, 3) = cbrt(x)
+
+ CallInst *CI = cast<CallInst>(FPOp);
+ if (ci_opr1 == 2 &&
+ shouldReplaceLibcallWithIntrinsic(CI,
+ /*AllowMinSizeF32=*/true,
+ /*AllowF64=*/true)) {
+ // rootn(x, 2) = sqrt(x)
+ LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> sqrt(" << *opr0 << ")\n");
+
+ CallInst *NewCall = B.CreateUnaryIntrinsic(Intrinsic::sqrt, opr0, CI);
+ NewCall->takeName(CI);
+
+ // OpenCL rootn has a looser ulp of 2 requirement than sqrt, so add some
+ // metadata.
+ MDBuilder MDHelper(M->getContext());
+ MDNode *FPMD = MDHelper.createFPMath(std::max(FPOp->getFPAccuracy(), 2.0f));
+ NewCall->setMetadata(LLVMContext::MD_fpmath, FPMD);
+
+ replaceCall(CI, NewCall);
+ return true;
+ }
+
+ if (ci_opr1 == 3) { // rootn(x, 3) = cbrt(x)
if (FunctionCallee FPExpr =
getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_CBRT, FInfo))) {
LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> cbrt(" << *opr0
@@ -1200,16 +1215,36 @@ bool AMDGPULibCalls::fold_rootn(FPMathOperator *FPOp, IRBuilder<> &B,
"__rootn2div");
replaceCall(FPOp, nval);
return true;
- } else if (ci_opr1 == -2) { // rootn(x, -2) = rsqrt(x)
- if (FunctionCallee FPExpr =
- getFunction(M, AMDGPULibFunc(AMDGPULibFunc::EI_RSQRT, FInfo))) {
- LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> rsqrt(" << *opr0
- << ")\n");
- Value *nval = CreateCallEx(B,FPExpr, opr0, "__rootn2rsqrt");
- replaceCall(FPOp, nval);
- return true;
- }
}
+
+ if (ci_opr1 == -2 &&
+ shouldReplaceLibcallWithIntrinsic(CI,
+ /*AllowMinSizeF32=*/true,
+ /*AllowF64=*/true)) {
+ // rootn(x, -2) = rsqrt(x)
+
+ // The original rootn had looser ulp requirements than the resultant sqrt
+ // and fdiv.
+ MDBuilder MDHelper(M->getContext());
+ MDNode *FPMD = MDHelper.createFPMath(std::max(FPOp->getFPAccuracy(), 2.0f));
+
+ // TODO: Could handle strictfp but need to fix strict sqrt emission
+ FastMathFlags FMF = FPOp->getFastMathFlags();
+ FMF.setAllowContract(true);
+
+ CallInst *Sqrt = B.CreateUnaryIntrinsic(Intrinsic::sqrt, opr0, CI);
+ Instruction *RSqrt = cast<Instruction>(
+ B.CreateFDiv(ConstantFP::get(opr0->getType(), 1.0), Sqrt));
+ Sqrt->setFastMathFlags(FMF);
+ RSqrt->setFastMathFlags(FMF);
+ RSqrt->setMetadata(LLVMContext::MD_fpmath, FPMD);
+
+ LLVM_DEBUG(errs() << "AMDIC: " << *FPOp << " ---> rsqrt(" << *opr0
+ << ")\n");
+ replaceCall(CI, RSqrt);
+ return true;
+ }
+
return false;
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
index 84320d296a03..437e01c37c6b 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp
@@ -1129,31 +1129,56 @@ InstructionCost GCNTTIImpl::getShuffleCost(TTI::ShuffleKind Kind,
int Index, VectorType *SubTp,
ArrayRef<const Value *> Args,
const Instruction *CxtI) {
+ if (!isa<FixedVectorType>(VT))
+ return BaseT::getShuffleCost(Kind, VT, Mask, CostKind, Index, SubTp);
+
Kind = improveShuffleKindFromMask(Kind, Mask, VT, Index, SubTp);
- // Treat extractsubvector as single op permutation.
- bool IsExtractSubvector = Kind == TTI::SK_ExtractSubvector;
- if (IsExtractSubvector)
- Kind = TTI::SK_PermuteSingleSrc;
-
- if (ST->hasVOP3PInsts()) {
- if (cast<FixedVectorType>(VT)->getNumElements() == 2 &&
- DL.getTypeSizeInBits(VT->getElementType()) == 16) {
- // With op_sel VOP3P instructions freely can access the low half or high
- // half of a register, so any swizzle is free.
- switch (Kind) {
- case TTI::SK_Broadcast:
- case TTI::SK_Reverse:
- case TTI::SK_PermuteSingleSrc:
+ // Larger vector widths may require additional instructions, but are
+ // typically cheaper than scalarized versions.
+ unsigned NumVectorElts = cast<FixedVectorType>(VT)->getNumElements();
+ if (ST->getGeneration() >= AMDGPUSubtarget::VOLCANIC_ISLANDS &&
+ DL.getTypeSizeInBits(VT->getElementType()) == 16) {
+ bool HasVOP3P = ST->hasVOP3PInsts();
+ unsigned RequestedElts =
+ count_if(Mask, [](int MaskElt) { return MaskElt != -1; });
+ if (RequestedElts == 0)
+ return 0;
+ switch (Kind) {
+ case TTI::SK_Broadcast:
+ case TTI::SK_Reverse:
+ case TTI::SK_PermuteSingleSrc: {
+ // With op_sel VOP3P instructions freely can access the low half or high
+ // half of a register, so any swizzle of two elements is free.
+ if (HasVOP3P && NumVectorElts == 2)
return 0;
- default:
- break;
- }
+ unsigned NumPerms = alignTo(RequestedElts, 2) / 2;
+ // SK_Broadcast just reuses the same mask
+ unsigned NumPermMasks = Kind == TTI::SK_Broadcast ? 1 : NumPerms;
+ return NumPerms + NumPermMasks;
+ }
+ case TTI::SK_ExtractSubvector:
+ case TTI::SK_InsertSubvector: {
+ // Even aligned accesses are free
+ if (!(Index % 2))
+ return 0;
+ // Insert/extract subvectors only require shifts / extract code to get the
+ // relevant bits
+ return alignTo(RequestedElts, 2) / 2;
+ }
+ case TTI::SK_PermuteTwoSrc:
+ case TTI::SK_Splice:
+ case TTI::SK_Select: {
+ unsigned NumPerms = alignTo(RequestedElts, 2) / 2;
+ // SK_Select just reuses the same mask
+ unsigned NumPermMasks = Kind == TTI::SK_Select ? 1 : NumPerms;
+ return NumPerms + NumPermMasks;
+ }
+
+ default:
+ break;
}
}
- // Restore optimal kind.
- if (IsExtractSubvector)
- Kind = TTI::SK_ExtractSubvector;
return BaseT::getShuffleCost(Kind, VT, Mask, CostKind, Index, SubTp);
}
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index d7b6941fcf81..45a16a14996e 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -957,6 +957,11 @@ const GCNSubtarget *SITargetLowering::getSubtarget() const {
return Subtarget;
}
+ArrayRef<MCPhysReg> SITargetLowering::getRoundingControlRegisters() const {
+ static const MCPhysReg RCRegs[] = {AMDGPU::MODE};
+ return RCRegs;
+}
+
//===----------------------------------------------------------------------===//
// TargetLowering queries
//===----------------------------------------------------------------------===//
@@ -12074,11 +12079,9 @@ calculateByteProvider(const SDValue &Op, unsigned Index, unsigned Depth,
return std::nullopt;
auto VecIdx = IdxOp->getZExtValue();
auto ScalarSize = Op.getScalarValueSizeInBits();
- if (ScalarSize != 32) {
+ if (ScalarSize < 32)
Index = ScalarSize == 8 ? VecIdx : VecIdx * 2 + Index;
- }
-
- return calculateSrcByte(ScalarSize == 32 ? Op : Op.getOperand(0),
+ return calculateSrcByte(ScalarSize >= 32 ? Op : Op.getOperand(0),
StartingIndex, Index);
}
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.h b/llvm/lib/Target/AMDGPU/SIISelLowering.h
index 08aa2a599163..fed73f48840f 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.h
@@ -287,6 +287,8 @@ public:
const GCNSubtarget *getSubtarget() const;
+ ArrayRef<MCPhysReg> getRoundingControlRegisters() const override;
+
bool isFPExtFoldable(const SelectionDAG &DAG, unsigned Opcode, EVT DestVT,
EVT SrcVT) const override;
diff --git a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
index 5577ce9eb128..230443313d72 100644
--- a/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
+++ b/llvm/lib/Target/AMDGPU/SIInsertWaitcnts.cpp
@@ -900,18 +900,6 @@ void WaitcntBrackets::updateByEvent(const SIInstrInfo *TII,
}
}
}
-#if 0 // TODO: check if this is handled by MUBUF code above.
- } else if (Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORD ||
- Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX2 ||
- Inst.getOpcode() == AMDGPU::BUFFER_STORE_DWORDX4) {
- MachineOperand *MO = TII->getNamedOperand(Inst, AMDGPU::OpName::data);
- unsigned OpNo;//TODO: find the OpNo for this operand;
- RegInterval Interval = getRegInterval(&Inst, MRI, TRI, OpNo);
- for (int RegNo = Interval.first; RegNo < Interval.second;
- ++RegNo) {
- setRegScore(RegNo + NUM_ALL_VGPRS, t, CurrScore);
- }
-#endif
} else /* LGKM_CNT || EXP_CNT || VS_CNT || NUM_INST_CNTS */ {
// Match the score to the destination registers.
for (unsigned I = 0, E = Inst.getNumOperands(); I != E; ++I) {
@@ -1673,59 +1661,6 @@ bool SIInsertWaitcnts::generateWaitcntInstBefore(MachineInstr &MI,
AMDGPU::SendMsg::ID_GS_DONE_PreGFX11)) {
Wait.LoadCnt = 0;
}
-#if 0 // TODO: the following blocks of logic when we have fence.
- else if (MI.getOpcode() == SC_FENCE) {
- const unsigned int group_size =
- context->shader_info->GetMaxThreadGroupSize();
- // group_size == 0 means thread group size is unknown at compile time
- const bool group_is_multi_wave =
- (group_size == 0 || group_size > target_info->GetWaveFrontSize());
- const bool fence_is_global = !((SCInstInternalMisc*)Inst)->IsGroupFence();
-
- for (unsigned int i = 0; i < Inst->NumSrcOperands(); i++) {
- SCRegType src_type = Inst->GetSrcType(i);
- switch (src_type) {
- case SCMEM_LDS:
- if (group_is_multi_wave ||
- context->OptFlagIsOn(OPT_R1100_LDSMEM_FENCE_CHICKEN_BIT)) {
- EmitWaitcnt |= ScoreBrackets->updateByWait(DS_CNT,
- ScoreBrackets->getScoreUB(DS_CNT));
- // LDS may have to wait for VMcnt after buffer load to LDS
- if (target_info->HasBufferLoadToLDS()) {
- EmitWaitcnt |= ScoreBrackets->updateByWait(LOAD_CNT,
- ScoreBrackets->getScoreUB(LOAD_CNT));
- }
- }
- break;
-
- case SCMEM_GDS:
- if (group_is_multi_wave || fence_is_global) {
- EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
- ScoreBrackets->getScoreUB(EXP_CNT));
- EmitWaitcnt |= ScoreBrackets->updateByWait(DS_CNT,
- ScoreBrackets->getScoreUB(DS_CNT));
- }
- break;
-
- case SCMEM_UAV:
- case SCMEM_TFBUF:
- case SCMEM_RING:
- case SCMEM_SCATTER:
- if (group_is_multi_wave || fence_is_global) {
- EmitWaitcnt |= ScoreBrackets->updateByWait(EXP_CNT,
- ScoreBrackets->getScoreUB(EXP_CNT));
- EmitWaitcnt |= ScoreBrackets->updateByWait(LOAD_CNT,
- ScoreBrackets->getScoreUB(LOAD_CNT));
- }
- break;
-
- case SCMEM_SCRATCH:
- default:
- break;
- }
- }
- }
-#endif
// Export & GDS instructions do not read the EXEC mask until after the export
// is granted (which can occur well after the instruction is issued).
@@ -2309,17 +2244,6 @@ bool SIInsertWaitcnts::insertWaitcntInBlock(MachineFunction &MF,
updateEventWaitcntAfter(Inst, &ScoreBrackets);
-#if 0 // TODO: implement resource type check controlled by options with ub = LB.
- // If this instruction generates a S_SETVSKIP because it is an
- // indexed resource, and we are on Tahiti, then it will also force
- // an S_WAITCNT vmcnt(0)
- if (RequireCheckResourceType(Inst, context)) {
- // Force the score to as if an S_WAITCNT vmcnt(0) is emitted.
- ScoreBrackets->setScoreLB(LOAD_CNT,
- ScoreBrackets->getScoreUB(LOAD_CNT));
- }
-#endif
-
if (ST->isPreciseMemoryEnabled() && Inst.mayLoadOrStore()) {
AMDGPU::Waitcnt Wait = WCG->getAllZeroWaitcnt(
Inst.mayStore() && !SIInstrInfo::isAtomicRet(Inst));
diff --git a/llvm/lib/Target/AMDGPU/VOP1Instructions.td b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
index b875ddc62a7a..586a4a74ec34 100644
--- a/llvm/lib/Target/AMDGPU/VOP1Instructions.td
+++ b/llvm/lib/Target/AMDGPU/VOP1Instructions.td
@@ -1431,38 +1431,12 @@ defm V_CVT_F32_BF8 : VOP1_Real_NoDstSel_SDWA_gfx9<0x55>;
defm V_CVT_PK_F32_FP8 : VOP1_Real_NoDstSel_SDWA_gfx9<0x56>;
defm V_CVT_PK_F32_BF8 : VOP1_Real_NoDstSel_SDWA_gfx9<0x57>;
-//===----------------------------------------------------------------------===//
-// GFX10
-//===----------------------------------------------------------------------===//
-
-let OtherPredicates = [isGFX10Only] in {
-def : GCNPat <
+class MovDPP8Pattern<Predicate Pred, Instruction Inst> : GCNPat <
(i32 (int_amdgcn_mov_dpp8 i32:$src, timm:$dpp8)),
- (V_MOV_B32_dpp8_gfx10 VGPR_32:$src, VGPR_32:$src,
- (as_i32timm $dpp8), (i32 DPP8Mode.FI_0))
->;
-} // End OtherPredicates = [isGFX10Only]
-
-//===----------------------------------------------------------------------===//
-// GFX11
-//===----------------------------------------------------------------------===//
-
-let OtherPredicates = [isGFX11Only] in {
-def : GCNPat <
- (i32 (int_amdgcn_mov_dpp8 i32:$src, timm:$dpp8)),
- (V_MOV_B32_dpp8_gfx11 VGPR_32:$src, VGPR_32:$src,
- (as_i32timm $dpp8), (i32 DPP8Mode.FI_0))
->;
-} // End OtherPredicates = [isGFX11Only]
-
-//===----------------------------------------------------------------------===//
-// GFX12
-//===----------------------------------------------------------------------===//
+ (Inst VGPR_32:$src, VGPR_32:$src, (as_i32timm $dpp8), (i32 DPP8Mode.FI_0))> {
+ let OtherPredicates = [Pred];
+}
-let OtherPredicates = [isGFX12Only] in {
-def : GCNPat <
- (i32 (int_amdgcn_mov_dpp8 i32:$src, timm:$dpp8)),
- (V_MOV_B32_dpp8_gfx12 VGPR_32:$src, VGPR_32:$src,
- (as_i32timm $dpp8), (i32 DPP8Mode.FI_0))
->;
-} // End OtherPredicates = [isGFX12Only]
+def : MovDPP8Pattern<isGFX10Only, V_MOV_B32_dpp8_gfx10>;
+def : MovDPP8Pattern<isGFX11Only, V_MOV_B32_dpp8_gfx11>;
+def : MovDPP8Pattern<isGFX12Only, V_MOV_B32_dpp8_gfx12>;
diff --git a/llvm/lib/Target/BPF/BPFMIChecking.cpp b/llvm/lib/Target/BPF/BPFMIChecking.cpp
index 89ac485b1675..a968950f5bfc 100644
--- a/llvm/lib/Target/BPF/BPFMIChecking.cpp
+++ b/llvm/lib/Target/BPF/BPFMIChecking.cpp
@@ -20,6 +20,7 @@
#include "llvm/CodeGen/MachineFunctionPass.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/IR/DiagnosticInfo.h"
#include "llvm/Support/Debug.h"
using namespace llvm;
@@ -164,11 +165,9 @@ bool BPFMIPreEmitChecking::processAtomicInsts() {
if (hasLiveDefs(MI, TRI)) {
DebugLoc Empty;
const DebugLoc &DL = MI.getDebugLoc();
- if (DL != Empty)
- report_fatal_error(Twine("line ") + std::to_string(DL.getLine()) +
- ": Invalid usage of the XADD return value", false);
- else
- report_fatal_error("Invalid usage of the XADD return value", false);
+ const Function &F = MF->getFunction();
+ F.getContext().diagnose(DiagnosticInfoUnsupported{
+ F, "Invalid usage of the XADD return value", DL});
}
}
}
diff --git a/llvm/lib/Target/LoongArch/LoongArchSubtarget.h b/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
index b87ea6e2ec32..a8752c8070aa 100644
--- a/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
+++ b/llvm/lib/Target/LoongArch/LoongArchSubtarget.h
@@ -37,6 +37,10 @@ class LoongArchSubtarget : public LoongArchGenSubtargetInfo {
#include "LoongArchGenSubtargetInfo.inc"
unsigned GRLen = 32;
+ // TODO: The default value is empirical and conservative. Override the
+ // default in initializeProperties once we support optimizing for more
+ // uarches.
+ uint8_t MaxInterleaveFactor = 2;
MVT GRLenVT = MVT::i32;
LoongArchABI::ABI TargetABI = LoongArchABI::ABI_Unknown;
LoongArchFrameLowering FrameLowering;
@@ -99,6 +103,7 @@ public:
Align getPrefFunctionAlignment() const { return PrefFunctionAlignment; }
Align getPrefLoopAlignment() const { return PrefLoopAlignment; }
unsigned getMaxBytesForAlignment() const { return MaxBytesForAlignment; }
+ unsigned getMaxInterleaveFactor() const { return MaxInterleaveFactor; }
bool enableMachineScheduler() const override { return true; }
};
} // end namespace llvm
diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
index add1c60d89d2..710650acba30 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.cpp
@@ -69,6 +69,10 @@ unsigned LoongArchTTIImpl::getRegisterClassForType(bool Vector,
return LoongArchRegisterClass::GPRRC;
}
+unsigned LoongArchTTIImpl::getMaxInterleaveFactor(ElementCount VF) {
+ return ST->getMaxInterleaveFactor();
+}
+
const char *LoongArchTTIImpl::getRegisterClassName(unsigned ClassID) const {
switch (ClassID) {
case LoongArchRegisterClass::GPRRC:
diff --git a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
index 34c18163bbdb..06a03d29931d 100644
--- a/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
+++ b/llvm/lib/Target/LoongArch/LoongArchTargetTransformInfo.h
@@ -43,6 +43,7 @@ public:
TypeSize getRegisterBitWidth(TargetTransformInfo::RegisterKind K) const;
unsigned getNumberOfRegisters(unsigned ClassID) const;
unsigned getRegisterClassForType(bool Vector, Type *Ty = nullptr) const;
+ unsigned getMaxInterleaveFactor(ElementCount VF);
const char *getRegisterClassName(unsigned ClassID) const;
// TODO: Implement more hooks to provide TTI machinery for LoongArch.
diff --git a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
index ac48dc5af9d5..72e8215fffaf 100644
--- a/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
+++ b/llvm/lib/Target/PowerPC/PPCAsmPrinter.cpp
@@ -1157,12 +1157,12 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) {
MCSymbolRefExpr::VariantKind VK = GetVKForMO(MO);
- // If the symbol isn't toc-data then use the TOC on AIX.
// Map the global address operand to be a reference to the TOC entry we
// will synthesize later. 'TOCEntry' is a label used to reference the
// storage allocated in the TOC which contains the address of 'MOSymbol'.
- // If the toc-data attribute is used, the TOC entry contains the data
- // rather than the address of the MOSymbol.
+ // If the symbol does not have the toc-data attribute, then we create the
+ // TOC entry on AIX. If the toc-data attribute is used, the TOC entry
+ // contains the data rather than the address of the MOSymbol.
if (![](const MachineOperand &MO) {
if (!MO.isGlobal())
return false;
@@ -1170,7 +1170,6 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) {
const GlobalVariable *GV = dyn_cast<GlobalVariable>(MO.getGlobal());
if (!GV)
return false;
-
return GV->hasAttribute("toc-data");
}(MO)) {
MOSymbol = lookUpOrCreateTOCEntry(MOSymbol, getTOCEntryTypeForMO(MO), VK);
@@ -1301,8 +1300,10 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) {
unsigned Op = MI->getOpcode();
- // Change the opcode to load address for tocdata
- TmpInst.setOpcode(Op == PPC::ADDItocL8 ? PPC::ADDI8 : PPC::LA);
+ // Change the opcode to load address for toc-data.
+ // ADDItocL is only used for 32-bit toc-data on AIX and will always use LA.
+ TmpInst.setOpcode(Op == PPC::ADDItocL8 ? (IsAIX ? PPC::LA8 : PPC::ADDI8)
+ : PPC::LA);
const MachineOperand &MO = MI->getOperand(2);
assert((Op == PPC::ADDItocL8)
@@ -1316,8 +1317,7 @@ void PPCAsmPrinter::emitInstruction(const MachineInstr *MI) {
const MCExpr *Exp = MCSymbolRefExpr::create(
MOSymbol,
- Op == PPC::ADDItocL8 ? MCSymbolRefExpr::VK_PPC_TOC_LO
- : MCSymbolRefExpr::VK_PPC_L,
+ IsAIX ? MCSymbolRefExpr::VK_PPC_L : MCSymbolRefExpr::VK_PPC_TOC_LO,
OutContext);
TmpInst.getOperand(2) = MCOperand::createExpr(Exp);
diff --git a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
index 68621558e3fa..26560dc5cdeb 100644
--- a/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelDAGToDAG.cpp
@@ -6143,23 +6143,22 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
" ELF/AIX or 32-bit AIX in the following.");
// Transforms the ISD::TOC_ENTRY node for 32-bit AIX large code model mode,
- // or 64-bit medium (ELF-only), or large (ELF and AIX) code model code that
- // does not conain TOC data symbols.
- // We generate two instructions as described below. The first source
- // operand is a symbol reference. If it must be referenced via the toc
- // according to Subtarget, we generate:
+ // 64-bit medium (ELF-only), or 64-bit large (ELF and AIX) code model code
+ // that does not contain TOC data symbols. We generate two instructions as
+ // described below. The first source operand is a symbol reference. If it
+ // must be referenced via the TOC according to Subtarget, we generate:
// [32-bit AIX]
// LWZtocL(@sym, ADDIStocHA(%r2, @sym))
// [64-bit ELF/AIX]
// LDtocL(@sym, ADDIStocHA8(%x2, @sym))
- // Otherwise we generate:
+ // Otherwise for medium code model ELF we generate:
// ADDItocL8(ADDIStocHA8(%x2, @sym), @sym)
- // For large code model with TOC data symbols we generate:
+ // And finally for AIX with toc-data we generate:
// [32-bit AIX]
// ADDItocL(ADDIStocHA(%x2, @sym), @sym)
// [64-bit AIX]
- // Currently not supported.
+ // ADDItocL8(ADDIStocHA8(%x2, @sym), @sym)
SDValue GA = N->getOperand(0);
SDValue TOCbase = N->getOperand(1);
@@ -6171,12 +6170,9 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
// On AIX, if the symbol has the toc-data attribute it will be defined
// in the TOC entry, so we use an ADDItocL/ADDItocL8.
if (isAIXABI && hasTocDataAttr(GA)) {
- if (isPPC64)
- report_fatal_error(
- "64-bit large code model toc-data not yet supported");
-
- ReplaceNode(N, CurDAG->getMachineNode(PPC::ADDItocL, dl, VT,
- SDValue(Tmp, 0), GA));
+ ReplaceNode(
+ N, CurDAG->getMachineNode(isPPC64 ? PPC::ADDItocL8 : PPC::ADDItocL,
+ dl, VT, SDValue(Tmp, 0), GA));
return;
}
@@ -6191,6 +6187,7 @@ void PPCDAGToDAGISel::Select(SDNode *N) {
return;
}
+ assert(isPPC64 && "TOC_ENTRY already handled for 32-bit.");
// Build the address relative to the TOC-pointer.
ReplaceNode(N, CurDAG->getMachineNode(PPC::ADDItocL8, dl, MVT::i64,
SDValue(Tmp, 0), GA));
@@ -7777,6 +7774,10 @@ void PPCDAGToDAGISel::PeepholePPC64() {
Flags = PPCII::MO_TLSLD_LO;
break;
case PPC::ADDItocL8:
+ // Skip the following peephole optimizations for ADDItocL8 on AIX which
+ // is used for toc-data access.
+ if (Subtarget->isAIXABI())
+ continue;
Flags = PPCII::MO_TOC_LO;
break;
}
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
index 9e56de732c58..85bbfabf5d3c 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.cpp
@@ -4438,6 +4438,12 @@ bool PPCInstrInfo::isDefMIElgibleForForwarding(MachineInstr &DefMI,
if (Opc != PPC::ADDItocL8 && Opc != PPC::ADDI && Opc != PPC::ADDI8)
return false;
+ // Skip the optimization of transformTo[NewImm|Imm]FormFedByAdd for ADDItocL8
+ // on AIX which is used for toc-data access. TODO: Follow up to see if it can
+ // apply for AIX toc-data as well.
+ if (Opc == PPC::ADDItocL8 && Subtarget.isAIX())
+ return false;
+
assert(DefMI.getNumOperands() >= 3 &&
"Add inst must have at least three operands");
RegMO = &DefMI.getOperand(1);
diff --git a/llvm/lib/Target/PowerPC/PPCInstrInfo.td b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
index 7929a781dbda..e3d6d2f094f2 100644
--- a/llvm/lib/Target/PowerPC/PPCInstrInfo.td
+++ b/llvm/lib/Target/PowerPC/PPCInstrInfo.td
@@ -3346,7 +3346,7 @@ def ADDIStocHA : PPCEmitTimePseudo<(outs gprc:$rD), (ins gprc_nor0:$reg, tocentr
"#ADDIStocHA",
[(set i32:$rD,
(PPCtoc_entry i32:$reg, tglobaladdr:$disp))]>;
-// TOC Data Transform AIX
+// TOC Data Transform on AIX
def ADDItoc : PPCEmitTimePseudo<(outs gprc:$rD), (ins tocentry32:$disp, gprc:$reg),
"#ADDItoc",
[(set i32:$rD,
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 791d364655e5..da8daa573b89 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -558,6 +558,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
case TargetOpcode::G_PTRTOINT:
case TargetOpcode::G_INTTOPTR:
case TargetOpcode::G_TRUNC:
+ case TargetOpcode::G_FREEZE:
return selectCopy(MI, MRI);
case TargetOpcode::G_CONSTANT: {
Register DstReg = MI.getOperand(0).getReg();
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index adc68e9ee4a8..c73fe2c6cecb 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -227,7 +227,8 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
ConstantActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen);
// TODO: transform illegal vector types into legal vector type
- getActionDefinitionsBuilder({G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER})
+ getActionDefinitionsBuilder(
+ {G_IMPLICIT_DEF, G_CONSTANT_FOLD_BARRIER, G_FREEZE})
.legalFor({s32, sXLen, p0})
.legalIf(typeIsLegalBoolVec(0, BoolVecTys, ST))
.legalIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST))
diff --git a/llvm/lib/Target/RISCV/RISCVFeatures.td b/llvm/lib/Target/RISCV/RISCVFeatures.td
index 89e1214f469d..b099496d1838 100644
--- a/llvm/lib/Target/RISCV/RISCVFeatures.td
+++ b/llvm/lib/Target/RISCV/RISCVFeatures.td
@@ -211,8 +211,8 @@ def FeatureStdExtZa128rs : RISCVExtension<"za128rs", 1, 0,
"'Za128rs' (Reservation Set Size of at Most 128 Bytes)">;
def FeatureStdExtZaamo
- : RISCVExperimentalExtension<"zaamo", 0, 2,
- "'Zaamo' (Atomic Memory Operations)">;
+ : RISCVExtension<"zaamo", 1, 0,
+ "'Zaamo' (Atomic Memory Operations)">;
def HasStdExtAOrZaamo
: Predicate<"Subtarget->hasStdExtA() || Subtarget->hasStdExtZaamo()">,
AssemblerPredicate<(any_of FeatureStdExtA, FeatureStdExtZaamo),
@@ -242,8 +242,8 @@ def HasStdExtZalasr : Predicate<"Subtarget->hasStdExtZalasr()">,
"'Zalasr' (Load-Acquire and Store-Release Instructions)">;
def FeatureStdExtZalrsc
- : RISCVExperimentalExtension<"zalrsc", 0, 2,
- "'Zalrsc' (Load-Reserved/Store-Conditional)">;
+ : RISCVExtension<"zalrsc", 1, 0,
+ "'Zalrsc' (Load-Reserved/Store-Conditional)">;
def HasStdExtAOrZalrsc
: Predicate<"Subtarget->hasStdExtA() || Subtarget->hasStdExtZalrsc()">,
AssemblerPredicate<(any_of FeatureStdExtA, FeatureStdExtZalrsc),
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index 8d9b0f2acc5f..05859a1f4898 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -844,8 +844,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
VT, Custom);
setOperationAction({ISD::FP_TO_SINT_SAT, ISD::FP_TO_UINT_SAT}, VT,
Custom);
- setOperationAction({ISD::AVGFLOORU, ISD::AVGCEILU, ISD::SADDSAT,
- ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT},
+ setOperationAction({ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS,
+ ISD::AVGCEILU, ISD::SADDSAT, ISD::UADDSAT,
+ ISD::SSUBSAT, ISD::USUBSAT},
VT, Legal);
// Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
@@ -1237,8 +1238,9 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
if (VT.getVectorElementType() != MVT::i64 || Subtarget.hasStdExtV())
setOperationAction({ISD::MULHS, ISD::MULHU}, VT, Custom);
- setOperationAction({ISD::AVGFLOORU, ISD::AVGCEILU, ISD::SADDSAT,
- ISD::UADDSAT, ISD::SSUBSAT, ISD::USUBSAT},
+ setOperationAction({ISD::AVGFLOORS, ISD::AVGFLOORU, ISD::AVGCEILS,
+ ISD::AVGCEILU, ISD::SADDSAT, ISD::UADDSAT,
+ ISD::SSUBSAT, ISD::USUBSAT},
VT, Custom);
setOperationAction(ISD::VSELECT, VT, Custom);
@@ -1917,7 +1919,7 @@ bool RISCVTargetLowering::hasAndNotCompare(SDValue Y) const {
return false;
return (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbkb()) &&
- !isa<ConstantSDNode>(Y);
+ (!isa<ConstantSDNode>(Y) || cast<ConstantSDNode>(Y)->isOpaque());
}
bool RISCVTargetLowering::hasBitTest(SDValue X, SDValue Y) const {
@@ -5841,7 +5843,9 @@ static unsigned getRISCVVLOp(SDValue Op) {
OP_CASE(UADDSAT)
OP_CASE(SSUBSAT)
OP_CASE(USUBSAT)
+ OP_CASE(AVGFLOORS)
OP_CASE(AVGFLOORU)
+ OP_CASE(AVGCEILS)
OP_CASE(AVGCEILU)
OP_CASE(FADD)
OP_CASE(FSUB)
@@ -5956,7 +5960,7 @@ static bool hasMergeOp(unsigned Opcode) {
Opcode <= RISCVISD::LAST_RISCV_STRICTFP_OPCODE &&
"not a RISC-V target specific op");
static_assert(RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP ==
- 126 &&
+ 128 &&
RISCVISD::LAST_RISCV_STRICTFP_OPCODE -
ISD::FIRST_TARGET_STRICTFP_OPCODE ==
21 &&
@@ -5982,7 +5986,7 @@ static bool hasMaskOp(unsigned Opcode) {
Opcode <= RISCVISD::LAST_RISCV_STRICTFP_OPCODE &&
"not a RISC-V target specific op");
static_assert(RISCVISD::LAST_VL_VECTOR_OP - RISCVISD::FIRST_VL_VECTOR_OP ==
- 126 &&
+ 128 &&
RISCVISD::LAST_RISCV_STRICTFP_OPCODE -
ISD::FIRST_TARGET_STRICTFP_OPCODE ==
21 &&
@@ -6882,7 +6886,9 @@ SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
!Subtarget.hasVInstructionsF16()))
return SplitVectorOp(Op, DAG);
[[fallthrough]];
+ case ISD::AVGFLOORS:
case ISD::AVGFLOORU:
+ case ISD::AVGCEILS:
case ISD::AVGCEILU:
case ISD::SMIN:
case ISD::SMAX:
@@ -19958,7 +19964,9 @@ const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(UDIV_VL)
NODE_NAME_CASE(UREM_VL)
NODE_NAME_CASE(XOR_VL)
+ NODE_NAME_CASE(AVGFLOORS_VL)
NODE_NAME_CASE(AVGFLOORU_VL)
+ NODE_NAME_CASE(AVGCEILS_VL)
NODE_NAME_CASE(AVGCEILU_VL)
NODE_NAME_CASE(SADDSAT_VL)
NODE_NAME_CASE(UADDSAT_VL)
@@ -21435,7 +21443,8 @@ bool RISCVTargetLowering::fallBackToDAGISel(const Instruction &Inst) const {
if (Op == Instruction::Add || Op == Instruction::Sub ||
Op == Instruction::And || Op == Instruction::Or ||
Op == Instruction::Xor || Op == Instruction::InsertElement ||
- Op == Instruction::ShuffleVector || Op == Instruction::Load)
+ Op == Instruction::ShuffleVector || Op == Instruction::Load ||
+ Op == Instruction::Freeze)
return false;
if (Inst.getType()->isScalableTy())
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h
index 1efc54566b4b..856ce06ba1c4 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h
@@ -264,8 +264,12 @@ enum NodeType : unsigned {
SSUBSAT_VL,
USUBSAT_VL,
+ // Averaging adds of signed integers.
+ AVGFLOORS_VL,
// Averaging adds of unsigned integers.
AVGFLOORU_VL,
+ // Rounding averaging adds of signed integers.
+ AVGCEILS_VL,
// Rounding averaging adds of unsigned integers.
AVGCEILU_VL,
@@ -959,7 +963,6 @@ private:
SDValue lowerFixedLengthVectorSelectToRVV(SDValue Op,
SelectionDAG &DAG) const;
SDValue lowerToScalableOp(SDValue Op, SelectionDAG &DAG) const;
- SDValue lowerUnsignedAvgFloor(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerIS_FPCLASS(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerVPOp(SDValue Op, SelectionDAG &DAG) const;
SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG) const;
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 324ce5cb5ed7..c0b2a695b8ea 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -215,7 +215,11 @@ struct DemandedFields {
// than 64.
SEWNone = 0 // We don't need to preserve SEW at all.
} SEW = SEWNone;
- bool LMUL = false;
+ enum : uint8_t {
+ LMULEqual = 2, // The exact value of LMUL needs to be preserved.
+ LMULLessThanOrEqualToM1 = 1, // We can use any LMUL <= M1.
+ LMULNone = 0 // We don't need to preserve LMUL at all.
+ } LMUL = LMULNone;
bool SEWLMULRatio = false;
bool TailPolicy = false;
bool MaskPolicy = false;
@@ -233,7 +237,7 @@ struct DemandedFields {
// Mark all VTYPE subfields and properties as demanded
void demandVTYPE() {
SEW = SEWEqual;
- LMUL = true;
+ LMUL = LMULEqual;
SEWLMULRatio = true;
TailPolicy = true;
MaskPolicy = true;
@@ -250,7 +254,7 @@ struct DemandedFields {
VLAny |= B.VLAny;
VLZeroness |= B.VLZeroness;
SEW = std::max(SEW, B.SEW);
- LMUL |= B.LMUL;
+ LMUL = std::max(LMUL, B.LMUL);
SEWLMULRatio |= B.SEWLMULRatio;
TailPolicy |= B.TailPolicy;
MaskPolicy |= B.MaskPolicy;
@@ -284,7 +288,19 @@ struct DemandedFields {
break;
};
OS << ", ";
- OS << "LMUL=" << LMUL << ", ";
+ OS << "LMUL=";
+ switch (LMUL) {
+ case LMULEqual:
+ OS << "LMULEqual";
+ break;
+ case LMULLessThanOrEqualToM1:
+ OS << "LMULLessThanOrEqualToM1";
+ break;
+ case LMULNone:
+ OS << "LMULNone";
+ break;
+ };
+ OS << ", ";
OS << "SEWLMULRatio=" << SEWLMULRatio << ", ";
OS << "TailPolicy=" << TailPolicy << ", ";
OS << "MaskPolicy=" << MaskPolicy;
@@ -301,6 +317,11 @@ inline raw_ostream &operator<<(raw_ostream &OS, const DemandedFields &DF) {
}
#endif
+static bool isLMUL1OrSmaller(RISCVII::VLMUL LMUL) {
+ auto [LMul, Fractional] = RISCVVType::decodeVLMUL(LMUL);
+ return Fractional || LMul == 1;
+}
+
/// Return true if moving from CurVType to NewVType is
/// indistinguishable from the perspective of an instruction (or set
/// of instructions) which use only the Used subfields and properties.
@@ -324,9 +345,18 @@ static bool areCompatibleVTYPEs(uint64_t CurVType, uint64_t NewVType,
break;
}
- if (Used.LMUL &&
- RISCVVType::getVLMUL(CurVType) != RISCVVType::getVLMUL(NewVType))
- return false;
+ switch (Used.LMUL) {
+ case DemandedFields::LMULNone:
+ break;
+ case DemandedFields::LMULEqual:
+ if (RISCVVType::getVLMUL(CurVType) != RISCVVType::getVLMUL(NewVType))
+ return false;
+ break;
+ case DemandedFields::LMULLessThanOrEqualToM1:
+ if (!isLMUL1OrSmaller(RISCVVType::getVLMUL(NewVType)))
+ return false;
+ break;
+ }
if (Used.SEWLMULRatio) {
auto Ratio1 = RISCVVType::getSEWLMULRatio(RISCVVType::getSEW(CurVType),
@@ -348,10 +378,10 @@ static bool areCompatibleVTYPEs(uint64_t CurVType, uint64_t NewVType,
/// Return the fields and properties demanded by the provided instruction.
DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
- // Warning: This function has to work on both the lowered (i.e. post
- // emitVSETVLIs) and pre-lowering forms. The main implication of this is
- // that it can't use the value of a SEW, VL, or Policy operand as they might
- // be stale after lowering.
+ // This function works in RISCVCoalesceVSETVLI too. We can still use the value
+ // of a SEW, VL, or Policy operand even though it might not be the exact value
+ // in the VL or VTYPE, since we only care about what the instruction
+ // originally demanded.
// Most instructions don't use any of these subfeilds.
DemandedFields Res;
@@ -382,7 +412,7 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
// in the opcode. This is asserted when constructing the VSETVLIInfo.
if (getEEWForLoadStore(MI)) {
Res.SEW = DemandedFields::SEWNone;
- Res.LMUL = false;
+ Res.LMUL = DemandedFields::LMULNone;
}
// Store instructions don't use the policy fields.
@@ -397,12 +427,12 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
// * The policy bits can probably be ignored..
if (isMaskRegOp(MI)) {
Res.SEW = DemandedFields::SEWNone;
- Res.LMUL = false;
+ Res.LMUL = DemandedFields::LMULNone;
}
// For vmv.s.x and vfmv.s.f, there are only two behaviors, VL = 0 and VL > 0.
if (isScalarInsertInstr(MI)) {
- Res.LMUL = false;
+ Res.LMUL = DemandedFields::LMULNone;
Res.SEWLMULRatio = false;
Res.VLAny = false;
// For vmv.s.x and vfmv.s.f, if the merge operand is *undefined*, we don't
@@ -423,12 +453,49 @@ DemandedFields getDemanded(const MachineInstr &MI, const RISCVSubtarget *ST) {
// vmv.x.s, and vmv.f.s are unconditional and ignore everything except SEW.
if (isScalarExtractInstr(MI)) {
assert(!RISCVII::hasVLOp(TSFlags));
- Res.LMUL = false;
+ Res.LMUL = DemandedFields::LMULNone;
Res.SEWLMULRatio = false;
Res.TailPolicy = false;
Res.MaskPolicy = false;
}
+ if (RISCVII::hasVLOp(MI.getDesc().TSFlags)) {
+ const MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI));
+ // A slidedown/slideup with an *undefined* merge op can freely clobber
+ // elements not copied from the source vector (e.g. masked off, tail, or
+ // slideup's prefix). Notes:
+ // * We can't modify SEW here since the slide amount is in units of SEW.
+ // * VL=1 is special only because we have existing support for zero vs
+ // non-zero VL. We could generalize this if we had a VL > C predicate.
+ // * The LMUL1 restriction is for machines whose latency may depend on VL.
+ // * As above, this is only legal for tail "undefined" not "agnostic".
+ if (isVSlideInstr(MI) && VLOp.isImm() && VLOp.getImm() == 1 &&
+ hasUndefinedMergeOp(MI)) {
+ Res.VLAny = false;
+ Res.VLZeroness = true;
+ Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
+ Res.TailPolicy = false;
+ }
+
+ // A tail undefined vmv.v.i/x or vfmv.v.f with VL=1 can be treated in the
+ // same semantically as vmv.s.x. This is particularly useful since we don't
+ // have an immediate form of vmv.s.x, and thus frequently use vmv.v.i in
+ // it's place. Since a splat is non-constant time in LMUL, we do need to be
+ // careful to not increase the number of active vector registers (unlike for
+ // vmv.s.x.)
+ if (isScalarSplatInstr(MI) && VLOp.isImm() && VLOp.getImm() == 1 &&
+ hasUndefinedMergeOp(MI)) {
+ Res.LMUL = DemandedFields::LMULLessThanOrEqualToM1;
+ Res.SEWLMULRatio = false;
+ Res.VLAny = false;
+ if (isFloatScalarMoveOrScalarSplatInstr(MI) && !ST->hasVInstructionsF64())
+ Res.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
+ else
+ Res.SEW = DemandedFields::SEWGreaterThanOrEqual;
+ Res.TailPolicy = false;
+ }
+ }
+
return Res;
}
@@ -1107,11 +1174,6 @@ void RISCVInsertVSETVLI::insertVSETVLI(MachineBasicBlock &MBB,
LIS->getMBBStartIdx(&MBB), LIS->getInstructionIndex(*MI).getRegSlot());
}
-static bool isLMUL1OrSmaller(RISCVII::VLMUL LMUL) {
- auto [LMul, Fractional] = RISCVVType::decodeVLMUL(LMUL);
- return Fractional || LMul == 1;
-}
-
/// Return true if a VSETVLI is required to transition from CurInfo to Require
/// before MI.
bool RISCVInsertVSETVLI::needVSETVLI(const MachineInstr &MI,
@@ -1124,40 +1186,6 @@ bool RISCVInsertVSETVLI::needVSETVLI(const MachineInstr &MI,
DemandedFields Used = getDemanded(MI, ST);
- // A slidedown/slideup with an *undefined* merge op can freely clobber
- // elements not copied from the source vector (e.g. masked off, tail, or
- // slideup's prefix). Notes:
- // * We can't modify SEW here since the slide amount is in units of SEW.
- // * VL=1 is special only because we have existing support for zero vs
- // non-zero VL. We could generalize this if we had a VL > C predicate.
- // * The LMUL1 restriction is for machines whose latency may depend on VL.
- // * As above, this is only legal for tail "undefined" not "agnostic".
- if (isVSlideInstr(MI) && Require.hasAVLImm() && Require.getAVLImm() == 1 &&
- isLMUL1OrSmaller(CurInfo.getVLMUL()) && hasUndefinedMergeOp(MI)) {
- Used.VLAny = false;
- Used.VLZeroness = true;
- Used.LMUL = false;
- Used.TailPolicy = false;
- }
-
- // A tail undefined vmv.v.i/x or vfmv.v.f with VL=1 can be treated in the same
- // semantically as vmv.s.x. This is particularly useful since we don't have an
- // immediate form of vmv.s.x, and thus frequently use vmv.v.i in it's place.
- // Since a splat is non-constant time in LMUL, we do need to be careful to not
- // increase the number of active vector registers (unlike for vmv.s.x.)
- if (isScalarSplatInstr(MI) && Require.hasAVLImm() &&
- Require.getAVLImm() == 1 && isLMUL1OrSmaller(CurInfo.getVLMUL()) &&
- hasUndefinedMergeOp(MI)) {
- Used.LMUL = false;
- Used.SEWLMULRatio = false;
- Used.VLAny = false;
- if (isFloatScalarMoveOrScalarSplatInstr(MI) && !ST->hasVInstructionsF64())
- Used.SEW = DemandedFields::SEWGreaterThanOrEqualAndLessThan64;
- else
- Used.SEW = DemandedFields::SEWGreaterThanOrEqual;
- Used.TailPolicy = false;
- }
-
if (CurInfo.isCompatible(Used, Require, LIS))
return false;
@@ -1189,7 +1217,7 @@ static VSETVLIInfo adjustIncoming(VSETVLIInfo PrevInfo, VSETVLIInfo NewInfo,
if (auto NewVLMul = RISCVVType::getSameRatioLMUL(
PrevInfo.getSEW(), PrevInfo.getVLMUL(), Info.getSEW()))
Info.setVLMul(*NewVLMul);
- Demanded.LMUL = true;
+ Demanded.LMUL = DemandedFields::LMULEqual;
}
return Info;
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
index e68fb42ece9f..0bbf71519953 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -975,11 +975,14 @@ multiclass VNCLP_IV_V_X_I<string opcodestr, bits<6> funct6> {
SchedUnaryMC<"WriteVNClipI", "ReadVNClipV">;
}
-multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6> {
+multiclass VSLD_IV_X_I<string opcodestr, bits<6> funct6, bit slidesUp> {
+ // Note: In the future, if VISlideI is also split into VSlideUpI and
+ // VSlideDownI, it'll probably better to use two separate multiclasses.
+ defvar WriteSlideX = !if(slidesUp, "WriteVSlideUpX", "WriteVSlideDownX");
def X : VALUVX<funct6, OPIVX, opcodestr # ".vx">,
- SchedBinaryMC<"WriteVISlideX", "ReadVISlideV", "ReadVISlideX">;
+ SchedBinaryMC<WriteSlideX, "ReadVISlideV", "ReadVISlideX">;
def I : VALUVI<funct6, opcodestr # ".vi", uimm5>,
- SchedUnaryMC<"WriteVISlideI", "ReadVISlideV">;
+ SchedUnaryMC<"WriteVSlideI", "ReadVISlideV">;
}
multiclass VSLD1_MV_X<string opcodestr, bits<6> funct6> {
@@ -1658,10 +1661,10 @@ def VFMV_S_F : RVInstV2<0b010000, 0b00000, OPFVF, (outs VR:$vd_wb),
let Predicates = [HasVInstructions] in {
// Vector Slide Instructions
let Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp in {
-defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110>;
+defm VSLIDEUP_V : VSLD_IV_X_I<"vslideup", 0b001110, /*slidesUp=*/true>;
defm VSLIDE1UP_V : VSLD1_MV_X<"vslide1up", 0b001110>;
} // Constraints = "@earlyclobber $vd", RVVConstraint = SlideUp
-defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111>;
+defm VSLIDEDOWN_V : VSLD_IV_X_I<"vslidedown", 0b001111, /*slidesUp=*/false>;
defm VSLIDE1DOWN_V : VSLD1_MV_X<"vslide1down", 0b001111>;
} // Predicates = [HasVInstructions]
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 317a6d7d4c52..8bf0f25d496a 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -3380,14 +3380,16 @@ multiclass VPseudoVMAC_VV_VF_AAXA_RM<string Constraint = ""> {
}
}
-multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, string Constraint = ""> {
+multiclass VPseudoVSLD_VX_VI<Operand ImmType = simm5, bit slidesUp = false,
+ string Constraint = ""> {
+ defvar WriteSlideX = !if(slidesUp, "WriteVSlideUpX", "WriteVSlideDownX");
foreach m = MxList in {
defvar mx = m.MX;
defm "" : VPseudoVSLDV_VX<m, Constraint>,
- SchedTernary<"WriteVISlideX", "ReadVISlideV", "ReadVISlideV",
+ SchedTernary<WriteSlideX, "ReadVISlideV", "ReadVISlideV",
"ReadVISlideX", mx>;
defm "" : VPseudoVSLDV_VI<ImmType, m, Constraint>,
- SchedBinary<"WriteVISlideI", "ReadVISlideV", "ReadVISlideV", mx>;
+ SchedBinary<"WriteVSlideI", "ReadVISlideV", "ReadVISlideV", mx>;
}
}
@@ -6861,8 +6863,8 @@ let mayLoad = 0, mayStore = 0, hasSideEffects = 0 in {
// 16.3. Vector Slide Instructions
//===----------------------------------------------------------------------===//
let Predicates = [HasVInstructions] in {
- defm PseudoVSLIDEUP : VPseudoVSLD_VX_VI<uimm5, "@earlyclobber $rd">;
- defm PseudoVSLIDEDOWN : VPseudoVSLD_VX_VI<uimm5>;
+ defm PseudoVSLIDEUP : VPseudoVSLD_VX_VI<uimm5, /*slidesUp=*/true, "@earlyclobber $rd">;
+ defm PseudoVSLIDEDOWN : VPseudoVSLD_VX_VI<uimm5, /*slidesUp=*/false>;
defm PseudoVSLIDE1UP : VPseudoVSLD1_VX<"@earlyclobber $rd">;
defm PseudoVSLIDE1DOWN : VPseudoVSLD1_VX;
} // Predicates = [HasVInstructions]
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
index 714f8cff7b63..66df24f2a458 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td
@@ -881,17 +881,17 @@ multiclass VPatMultiplyAddSDNode_VV_VX<SDNode op, string instruction_name> {
}
}
-multiclass VPatAVGADD_VV_VX_RM<SDNode vop, int vxrm> {
+multiclass VPatAVGADD_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> {
foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2)),
- (!cast<Instruction>("PseudoVAADDU_VV_"#vti.LMul.MX)
+ (!cast<Instruction>("PseudoVAADD"#suffix#"_VV_"#vti.LMul.MX)
(vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, vti.RegClass:$rs2,
vxrm, vti.AVL, vti.Log2SEW, TA_MA)>;
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector (SplatPat (XLenVT GPR:$rs2)))),
- (!cast<Instruction>("PseudoVAADDU_VX_"#vti.LMul.MX)
+ (!cast<Instruction>("PseudoVAADD"#suffix#"_VX_"#vti.LMul.MX)
(vti.Vector (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2,
vxrm, vti.AVL, vti.Log2SEW, TA_MA)>;
}
@@ -1163,8 +1163,10 @@ defm : VPatBinarySDNode_VV_VX<ssubsat, "PseudoVSSUB">;
defm : VPatBinarySDNode_VV_VX<usubsat, "PseudoVSSUBU">;
// 12.2. Vector Single-Width Averaging Add and Subtract
-defm : VPatAVGADD_VV_VX_RM<avgflooru, 0b10>;
-defm : VPatAVGADD_VV_VX_RM<avgceilu, 0b00>;
+defm : VPatAVGADD_VV_VX_RM<avgfloors, 0b10>;
+defm : VPatAVGADD_VV_VX_RM<avgflooru, 0b10, suffix = "U">;
+defm : VPatAVGADD_VV_VX_RM<avgceils, 0b00>;
+defm : VPatAVGADD_VV_VX_RM<avgceilu, 0b00, suffix = "U">;
// 12.5. Vector Narrowing Fixed-Point Clip Instructions
multiclass VPatTruncSatClipSDNode<VTypeInfo vti, VTypeInfo wti> {
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index e10b8bf2767b..8e8f86336d11 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -111,7 +111,9 @@ def riscv_ctlz_vl : SDNode<"RISCVISD::CTLZ_VL", SDT_RISCVIntUnOp_VL>
def riscv_cttz_vl : SDNode<"RISCVISD::CTTZ_VL", SDT_RISCVIntUnOp_VL>;
def riscv_ctpop_vl : SDNode<"RISCVISD::CTPOP_VL", SDT_RISCVIntUnOp_VL>;
+def riscv_avgfloors_vl : SDNode<"RISCVISD::AVGFLOORS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
def riscv_avgflooru_vl : SDNode<"RISCVISD::AVGFLOORU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
+def riscv_avgceils_vl : SDNode<"RISCVISD::AVGCEILS_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
def riscv_avgceilu_vl : SDNode<"RISCVISD::AVGCEILU_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
def riscv_saddsat_vl : SDNode<"RISCVISD::SADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
def riscv_uaddsat_vl : SDNode<"RISCVISD::UADDSAT_VL", SDT_RISCVIntBinOp_VL, [SDNPCommutative]>;
@@ -2073,19 +2075,19 @@ multiclass VPatSlide1VL_VF<SDNode vop, string instruction_name> {
}
}
-multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm> {
+multiclass VPatAVGADDVL_VV_VX_RM<SDNode vop, int vxrm, string suffix = ""> {
foreach vti = AllIntegerVectors in {
let Predicates = GetVTypePredicates<vti>.Predicates in {
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector vti.RegClass:$rs2),
vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
- (!cast<Instruction>("PseudoVAADDU_VV_"#vti.LMul.MX#"_MASK")
+ (!cast<Instruction>("PseudoVAADD"#suffix#"_VV_"#vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs1, vti.RegClass:$rs2,
(vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
def : Pat<(vop (vti.Vector vti.RegClass:$rs1),
(vti.Vector (SplatPat (XLenVT GPR:$rs2))),
vti.RegClass:$merge, (vti.Mask V0), VLOpFrag),
- (!cast<Instruction>("PseudoVAADDU_VX_"#vti.LMul.MX#"_MASK")
+ (!cast<Instruction>("PseudoVAADD"#suffix#"_VX_"#vti.LMul.MX#"_MASK")
vti.RegClass:$merge, vti.RegClass:$rs1, GPR:$rs2,
(vti.Mask V0), vxrm, GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
}
@@ -2369,8 +2371,10 @@ defm : VPatBinaryVL_VV_VX<riscv_ssubsat_vl, "PseudoVSSUB">;
defm : VPatBinaryVL_VV_VX<riscv_usubsat_vl, "PseudoVSSUBU">;
// 12.2. Vector Single-Width Averaging Add and Subtract
-defm : VPatAVGADDVL_VV_VX_RM<riscv_avgflooru_vl, 0b10>;
-defm : VPatAVGADDVL_VV_VX_RM<riscv_avgceilu_vl, 0b00>;
+defm : VPatAVGADDVL_VV_VX_RM<riscv_avgfloors_vl, 0b10>;
+defm : VPatAVGADDVL_VV_VX_RM<riscv_avgflooru_vl, 0b10, suffix="U">;
+defm : VPatAVGADDVL_VV_VX_RM<riscv_avgceils_vl, 0b00>;
+defm : VPatAVGADDVL_VV_VX_RM<riscv_avgceilu_vl, 0b00, suffix="U">;
// 12.5. Vector Narrowing Fixed-Point Clip Instructions
multiclass VPatTruncSatClipVL<VTypeInfo vti, VTypeInfo wti> {
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
index e67da839bdb8..83fb75727bbe 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFive7.td
@@ -937,10 +937,11 @@ foreach mx = SchedMxList in {
defvar Cycles = SiFive7GetCyclesDefault<mx>.c;
defvar IsWorstCase = SiFive7IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = 4, AcquireAtCycles = [0, 1], ReleaseAtCycles = [1, !add(1, Cycles)] in {
- defm "" : LMULWriteResMX<"WriteVISlideX", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVISlideI", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVISlide1X", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
- defm "" : LMULWriteResMX<"WriteVFSlide1F", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideUpX", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideDownX", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideI", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVISlide1X", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVFSlide1F", [SiFive7VCQ, SiFive7VA], mx, IsWorstCase>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
index 6ba299385f07..07d72b61862d 100644
--- a/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
+++ b/llvm/lib/Target/RISCV/RISCVSchedSiFiveP600.td
@@ -669,7 +669,7 @@ foreach mx = SchedMxList in {
defvar LMulLat = SiFiveP600GetLMulCycles<mx>.c;
defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = 2, ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVISlideI", [SiFiveP600VEXQ0], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideI", [SiFiveP600VEXQ0], mx, IsWorstCase>;
}
let Latency = 1, ReleaseAtCycles = [LMulLat] in {
defm "" : LMULWriteResMX<"WriteVISlide1X", [SiFiveP600VEXQ0], mx, IsWorstCase>;
@@ -679,7 +679,8 @@ foreach mx = SchedMxList in {
foreach mx = ["MF8", "MF4", "MF2", "M1"] in {
defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = 2, ReleaseAtCycles = [1] in {
- defm "" : LMULWriteResMX<"WriteVISlideX", [SiFiveP600VEXQ0], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideUpX", [SiFiveP600VEXQ0], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideDownX", [SiFiveP600VEXQ0], mx, IsWorstCase>;
}
}
@@ -688,7 +689,8 @@ foreach mx = ["M8", "M4", "M2"] in {
defvar LMulLat = SiFiveP600GetLMulCycles<mx>.c;
defvar IsWorstCase = SiFiveP600IsWorstCaseMX<mx, SchedMxList>.c;
let Latency = !add(4, LMulLat), ReleaseAtCycles = [LMulLat] in {
- defm "" : LMULWriteResMX<"WriteVISlideX", [SiFiveP600VEXQ1], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideUpX", [SiFiveP600VEXQ1], mx, IsWorstCase>;
+ defm "" : LMULWriteResMX<"WriteVSlideDownX", [SiFiveP600VEXQ1], mx, IsWorstCase>;
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVScheduleV.td b/llvm/lib/Target/RISCV/RISCVScheduleV.td
index 5be06d4c3f7e..e4524185991e 100644
--- a/llvm/lib/Target/RISCV/RISCVScheduleV.td
+++ b/llvm/lib/Target/RISCV/RISCVScheduleV.td
@@ -514,8 +514,9 @@ def WriteVMovXS : SchedWrite;
def WriteVMovSF : SchedWrite;
def WriteVMovFS : SchedWrite;
// 16.3. Vector Slide Instructions
-defm "" : LMULSchedWrites<"WriteVISlideX">;
-defm "" : LMULSchedWrites<"WriteVISlideI">;
+defm "" : LMULSchedWrites<"WriteVSlideUpX">;
+defm "" : LMULSchedWrites<"WriteVSlideDownX">;
+defm "" : LMULSchedWrites<"WriteVSlideI">;
defm "" : LMULSchedWrites<"WriteVISlide1X">;
defm "" : LMULSchedWrites<"WriteVFSlide1F">;
// 16.4. Vector Register Gather Instructions
@@ -949,8 +950,9 @@ def : WriteRes<WriteVMovSX, []>;
def : WriteRes<WriteVMovXS, []>;
def : WriteRes<WriteVMovSF, []>;
def : WriteRes<WriteVMovFS, []>;
-defm "" : LMULWriteRes<"WriteVISlideX", []>;
-defm "" : LMULWriteRes<"WriteVISlideI", []>;
+defm "" : LMULWriteRes<"WriteVSlideUpX", []>;
+defm "" : LMULWriteRes<"WriteVSlideDownX", []>;
+defm "" : LMULWriteRes<"WriteVSlideI", []>;
defm "" : LMULWriteRes<"WriteVISlide1X", []>;
defm "" : LMULWriteRes<"WriteVFSlide1F", []>;
defm "" : LMULSEWWriteRes<"WriteVRGatherVV", []>;
diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
index 5aab138dae40..d9f8222669ca 100644
--- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
+++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp
@@ -96,6 +96,11 @@ static cl::opt<bool> EnableMISchedLoadClustering(
cl::desc("Enable load clustering in the machine scheduler"),
cl::init(false));
+static cl::opt<bool> EnableVSETVLIAfterRVVRegAlloc(
+ "riscv-vsetvl-after-rvv-regalloc", cl::Hidden,
+ cl::desc("Insert vsetvls after vector register allocation"),
+ cl::init(true));
+
extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeRISCVTarget() {
RegisterTargetMachine<RISCVTargetMachine> X(getTheRISCV32Target());
RegisterTargetMachine<RISCVTargetMachine> Y(getTheRISCV64Target());
@@ -389,6 +394,8 @@ FunctionPass *RISCVPassConfig::createRVVRegAllocPass(bool Optimized) {
bool RISCVPassConfig::addRegAssignAndRewriteFast() {
addPass(createRVVRegAllocPass(false));
+ if (EnableVSETVLIAfterRVVRegAlloc)
+ addPass(createRISCVInsertVSETVLIPass());
addPass(createRISCVCoalesceVSETVLIPass());
if (TM->getOptLevel() != CodeGenOptLevel::None &&
EnableRISCVDeadRegisterElimination)
@@ -399,6 +406,8 @@ bool RISCVPassConfig::addRegAssignAndRewriteFast() {
bool RISCVPassConfig::addRegAssignAndRewriteOptimized() {
addPass(createRVVRegAllocPass(true));
addPass(createVirtRegRewriter(false));
+ if (EnableVSETVLIAfterRVVRegAlloc)
+ addPass(createRISCVInsertVSETVLIPass());
addPass(createRISCVCoalesceVSETVLIPass());
if (TM->getOptLevel() != CodeGenOptLevel::None &&
EnableRISCVDeadRegisterElimination)
@@ -547,10 +556,12 @@ void RISCVPassConfig::addPreRegAlloc() {
// Run RISCVInsertVSETVLI after PHI elimination. On O1 and above do it after
// register coalescing so needVSETVLIPHI doesn't need to look through COPYs.
- if (TM->getOptLevel() == CodeGenOptLevel::None)
- insertPass(&PHIEliminationID, &RISCVInsertVSETVLIID);
- else
- insertPass(&RegisterCoalescerID, &RISCVInsertVSETVLIID);
+ if (!EnableVSETVLIAfterRVVRegAlloc) {
+ if (TM->getOptLevel() == CodeGenOptLevel::None)
+ insertPass(&PHIEliminationID, &RISCVInsertVSETVLIID);
+ else
+ insertPass(&RegisterCoalescerID, &RISCVInsertVSETVLIID);
+ }
}
void RISCVPassConfig::addFastRegAlloc() {
diff --git a/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp b/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
index ad0158086044..3206c264f99d 100644
--- a/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVAsmPrinter.cpp
@@ -69,7 +69,8 @@ public:
void outputOpFunctionEnd();
void outputExtFuncDecls();
void outputExecutionModeFromMDNode(Register Reg, MDNode *Node,
- SPIRV::ExecutionMode::ExecutionMode EM);
+ SPIRV::ExecutionMode::ExecutionMode EM,
+ unsigned ExpectMDOps, int64_t DefVal);
void outputExecutionModeFromNumthreadsAttribute(
const Register &Reg, const Attribute &Attr,
SPIRV::ExecutionMode::ExecutionMode EM);
@@ -422,12 +423,19 @@ static void addOpsFromMDNode(MDNode *MDN, MCInst &Inst,
}
void SPIRVAsmPrinter::outputExecutionModeFromMDNode(
- Register Reg, MDNode *Node, SPIRV::ExecutionMode::ExecutionMode EM) {
+ Register Reg, MDNode *Node, SPIRV::ExecutionMode::ExecutionMode EM,
+ unsigned ExpectMDOps, int64_t DefVal) {
MCInst Inst;
Inst.setOpcode(SPIRV::OpExecutionMode);
Inst.addOperand(MCOperand::createReg(Reg));
Inst.addOperand(MCOperand::createImm(static_cast<unsigned>(EM)));
addOpsFromMDNode(Node, Inst, MAI);
+ // reqd_work_group_size and work_group_size_hint require 3 operands,
+ // if metadata contains less operands, just add a default value
+ unsigned NodeSz = Node->getNumOperands();
+ if (ExpectMDOps > 0 && NodeSz < ExpectMDOps)
+ for (unsigned i = NodeSz; i < ExpectMDOps; ++i)
+ Inst.addOperand(MCOperand::createImm(DefVal));
outputMCInst(Inst);
}
@@ -473,17 +481,17 @@ void SPIRVAsmPrinter::outputExecutionMode(const Module &M) {
Register FReg = MAI->getFuncReg(&F);
assert(FReg.isValid());
if (MDNode *Node = F.getMetadata("reqd_work_group_size"))
- outputExecutionModeFromMDNode(FReg, Node,
- SPIRV::ExecutionMode::LocalSize);
+ outputExecutionModeFromMDNode(FReg, Node, SPIRV::ExecutionMode::LocalSize,
+ 3, 1);
if (Attribute Attr = F.getFnAttribute("hlsl.numthreads"); Attr.isValid())
outputExecutionModeFromNumthreadsAttribute(
FReg, Attr, SPIRV::ExecutionMode::LocalSize);
if (MDNode *Node = F.getMetadata("work_group_size_hint"))
outputExecutionModeFromMDNode(FReg, Node,
- SPIRV::ExecutionMode::LocalSizeHint);
+ SPIRV::ExecutionMode::LocalSizeHint, 3, 1);
if (MDNode *Node = F.getMetadata("intel_reqd_sub_group_size"))
outputExecutionModeFromMDNode(FReg, Node,
- SPIRV::ExecutionMode::SubgroupSize);
+ SPIRV::ExecutionMode::SubgroupSize, 0, 0);
if (MDNode *Node = F.getMetadata("vec_type_hint")) {
MCInst Inst;
Inst.setOpcode(SPIRV::OpExecutionMode);
diff --git a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
index 32df2403dfe5..a1a08c5c699b 100644
--- a/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
+++ b/llvm/lib/Target/SPIRV/SPIRVEmitIntrinsics.cpp
@@ -489,10 +489,6 @@ void SPIRVEmitIntrinsics::deduceOperandElementType(Instruction *I) {
Type *Ty = GR->findDeducedElementType(Op);
if (Ty == KnownElemTy)
continue;
- if (Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get()))
- setInsertPointSkippingPhis(B, User->getNextNode());
- else
- setInsertPointSkippingPhis(B, I);
Value *OpTyVal = Constant::getNullValue(KnownElemTy);
Type *OpTy = Op->getType();
if (!Ty) {
@@ -500,6 +496,8 @@ void SPIRVEmitIntrinsics::deduceOperandElementType(Instruction *I) {
// check if there is existing Intrinsic::spv_assign_ptr_type instruction
auto It = AssignPtrTypeInstr.find(Op);
if (It == AssignPtrTypeInstr.end()) {
+ Instruction *User = dyn_cast<Instruction>(Op->use_begin()->get());
+ setInsertPointSkippingPhis(B, User ? User->getNextNode() : I);
CallInst *CI =
buildIntrWithMD(Intrinsic::spv_assign_ptr_type, {OpTy}, OpTyVal, Op,
{B.getInt32(getPointerAddressSpace(OpTy))}, B);
@@ -511,6 +509,17 @@ void SPIRVEmitIntrinsics::deduceOperandElementType(Instruction *I) {
Ctx, MDNode::get(Ctx, ValueAsMetadata::getConstant(OpTyVal))));
}
} else {
+ if (auto *OpI = dyn_cast<Instruction>(Op)) {
+ // spv_ptrcast's argument Op denotes an instruction that generates
+ // a value, and we may use getInsertionPointAfterDef()
+ B.SetInsertPoint(*OpI->getInsertionPointAfterDef());
+ B.SetCurrentDebugLocation(OpI->getDebugLoc());
+ } else if (auto *OpA = dyn_cast<Argument>(Op)) {
+ B.SetInsertPointPastAllocas(OpA->getParent());
+ B.SetCurrentDebugLocation(DebugLoc());
+ } else {
+ B.SetInsertPoint(F->getEntryBlock().getFirstNonPHIOrDbgOrAlloca());
+ }
SmallVector<Type *, 2> Types = {OpTy, OpTy};
MetadataAsValue *VMD = MetadataAsValue::get(
Ctx, MDNode::get(Ctx, ValueAsMetadata::getConstant(OpTyVal)));
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 5d0846453685..24ee0c45b667 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -3292,7 +3292,7 @@ bool X86TargetLowering::hasAndNotCompare(SDValue Y) const {
if (VT != MVT::i32 && VT != MVT::i64)
return false;
- return !isa<ConstantSDNode>(Y);
+ return !isa<ConstantSDNode>(Y) || cast<ConstantSDNode>(Y)->isOpaque();
}
bool X86TargetLowering::hasAndNot(SDValue Y) const {
diff --git a/llvm/lib/Transforms/Coroutines/CoroElide.cpp b/llvm/lib/Transforms/Coroutines/CoroElide.cpp
index bb244489e4c2..74b5ccb7b9b7 100644
--- a/llvm/lib/Transforms/Coroutines/CoroElide.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroElide.cpp
@@ -464,13 +464,9 @@ bool CoroIdElider::attemptElide() {
return true;
}
-static bool declaresCoroElideIntrinsics(Module &M) {
- return coro::declaresIntrinsics(M, {"llvm.coro.id", "llvm.coro.id.async"});
-}
-
PreservedAnalyses CoroElidePass::run(Function &F, FunctionAnalysisManager &AM) {
auto &M = *F.getParent();
- if (!declaresCoroElideIntrinsics(M))
+ if (!coro::declaresIntrinsics(M, {"llvm.coro.id"}))
return PreservedAnalyses::all();
FunctionElideInfo FEI{&F};
diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
index 08a4522e3fac..dd9e77a855ef 100644
--- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -19,6 +19,7 @@
#include "llvm/ADT/PostOrderIterator.h"
#include "llvm/ADT/ScopeExit.h"
#include "llvm/ADT/SmallString.h"
+#include "llvm/Analysis/CFG.h"
#include "llvm/Analysis/PtrUseVisitor.h"
#include "llvm/Analysis/StackLifetime.h"
#include "llvm/Config/llvm-config.h"
@@ -1440,17 +1441,22 @@ namespace {
struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
using Base = PtrUseVisitor<AllocaUseVisitor>;
AllocaUseVisitor(const DataLayout &DL, const DominatorTree &DT,
- const CoroBeginInst &CB, const SuspendCrossingInfo &Checker,
+ const coro::Shape &CoroShape,
+ const SuspendCrossingInfo &Checker,
bool ShouldUseLifetimeStartInfo)
- : PtrUseVisitor(DL), DT(DT), CoroBegin(CB), Checker(Checker),
- ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {}
+ : PtrUseVisitor(DL), DT(DT), CoroShape(CoroShape), Checker(Checker),
+ ShouldUseLifetimeStartInfo(ShouldUseLifetimeStartInfo) {
+ for (AnyCoroSuspendInst *SuspendInst : CoroShape.CoroSuspends)
+ CoroSuspendBBs.insert(SuspendInst->getParent());
+ }
void visit(Instruction &I) {
Users.insert(&I);
Base::visit(I);
// If the pointer is escaped prior to CoroBegin, we have to assume it would
// be written into before CoroBegin as well.
- if (PI.isEscaped() && !DT.dominates(&CoroBegin, PI.getEscapingInst())) {
+ if (PI.isEscaped() &&
+ !DT.dominates(CoroShape.CoroBegin, PI.getEscapingInst())) {
MayWriteBeforeCoroBegin = true;
}
}
@@ -1553,10 +1559,19 @@ struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
// When we found the lifetime markers refers to a
// subrange of the original alloca, ignore the lifetime
// markers to avoid misleading the analysis.
- if (II.getIntrinsicID() != Intrinsic::lifetime_start || !IsOffsetKnown ||
- !Offset.isZero())
+ if (!IsOffsetKnown || !Offset.isZero())
+ return Base::visitIntrinsicInst(II);
+ switch (II.getIntrinsicID()) {
+ default:
return Base::visitIntrinsicInst(II);
- LifetimeStarts.insert(&II);
+ case Intrinsic::lifetime_start:
+ LifetimeStarts.insert(&II);
+ LifetimeStartBBs.push_back(II.getParent());
+ break;
+ case Intrinsic::lifetime_end:
+ LifetimeEndBBs.insert(II.getParent());
+ break;
+ }
}
void visitCallBase(CallBase &CB) {
@@ -1586,7 +1601,7 @@ struct AllocaUseVisitor : PtrUseVisitor<AllocaUseVisitor> {
private:
const DominatorTree &DT;
- const CoroBeginInst &CoroBegin;
+ const coro::Shape &CoroShape;
const SuspendCrossingInfo &Checker;
// All alias to the original AllocaInst, created before CoroBegin and used
// after CoroBegin. Each entry contains the instruction and the offset in the
@@ -1594,6 +1609,9 @@ private:
DenseMap<Instruction *, std::optional<APInt>> AliasOffetMap{};
SmallPtrSet<Instruction *, 4> Users{};
SmallPtrSet<IntrinsicInst *, 2> LifetimeStarts{};
+ SmallVector<BasicBlock *> LifetimeStartBBs{};
+ SmallPtrSet<BasicBlock *, 2> LifetimeEndBBs{};
+ SmallPtrSet<const BasicBlock *, 2> CoroSuspendBBs{};
bool MayWriteBeforeCoroBegin{false};
bool ShouldUseLifetimeStartInfo{true};
@@ -1605,10 +1623,19 @@ private:
// every basic block that uses the pointer to see if they cross suspension
// points. The uses cover both direct uses as well as indirect uses.
if (ShouldUseLifetimeStartInfo && !LifetimeStarts.empty()) {
- for (auto *I : Users)
- for (auto *S : LifetimeStarts)
- if (Checker.isDefinitionAcrossSuspend(*S, I))
- return true;
+ // If there is no explicit lifetime.end, then assume the address can
+ // cross suspension points.
+ if (LifetimeEndBBs.empty())
+ return true;
+
+ // If there is a path from a lifetime.start to a suspend without a
+ // corresponding lifetime.end, then the alloca's lifetime persists
+ // beyond that suspension point and the alloca must go on the frame.
+ llvm::SmallVector<BasicBlock *> Worklist(LifetimeStartBBs);
+ if (isManyPotentiallyReachableFromMany(Worklist, CoroSuspendBBs,
+ &LifetimeEndBBs, &DT))
+ return true;
+
// Addresses are guaranteed to be identical after every lifetime.start so
// we cannot use the local stack if the address escaped and there is a
// suspend point between lifetime markers. This should also cover the
@@ -1646,13 +1673,13 @@ private:
}
void handleMayWrite(const Instruction &I) {
- if (!DT.dominates(&CoroBegin, &I))
+ if (!DT.dominates(CoroShape.CoroBegin, &I))
MayWriteBeforeCoroBegin = true;
}
bool usedAfterCoroBegin(Instruction &I) {
for (auto &U : I.uses())
- if (DT.dominates(&CoroBegin, U))
+ if (DT.dominates(CoroShape.CoroBegin, U))
return true;
return false;
}
@@ -1661,7 +1688,7 @@ private:
// We track all aliases created prior to CoroBegin but used after.
// These aliases may need to be recreated after CoroBegin if the alloca
// need to live on the frame.
- if (DT.dominates(&CoroBegin, &I) || !usedAfterCoroBegin(I))
+ if (DT.dominates(CoroShape.CoroBegin, &I) || !usedAfterCoroBegin(I))
return;
if (!IsOffsetKnown) {
@@ -2830,8 +2857,7 @@ static void collectFrameAlloca(AllocaInst *AI, coro::Shape &Shape,
bool ShouldUseLifetimeStartInfo =
(Shape.ABI != coro::ABI::Async && Shape.ABI != coro::ABI::Retcon &&
Shape.ABI != coro::ABI::RetconOnce);
- AllocaUseVisitor Visitor{AI->getModule()->getDataLayout(), DT,
- *Shape.CoroBegin, Checker,
+ AllocaUseVisitor Visitor{AI->getModule()->getDataLayout(), DT, Shape, Checker,
ShouldUseLifetimeStartInfo};
Visitor.visitPtr(*AI);
if (!Visitor.getShouldLiveOnFrame())
diff --git a/llvm/lib/Transforms/IPO/FunctionImport.cpp b/llvm/lib/Transforms/IPO/FunctionImport.cpp
index 68f9799616ae..a116fd653534 100644
--- a/llvm/lib/Transforms/IPO/FunctionImport.cpp
+++ b/llvm/lib/Transforms/IPO/FunctionImport.cpp
@@ -140,6 +140,17 @@ static cl::opt<bool>
ImportAllIndex("import-all-index",
cl::desc("Import all external functions in index."));
+/// This is a test-only option.
+/// If this option is enabled, the ThinLTO indexing step will import each
+/// function declaration as a fallback. In a real build this may increase ram
+/// usage of the indexing step unnecessarily.
+/// TODO: Implement selective import (based on combined summary analysis) to
+/// ensure the imported function has a use case in the postlink pipeline.
+static cl::opt<bool> ImportDeclaration(
+ "import-declaration", cl::init(false), cl::Hidden,
+ cl::desc("If true, import function declaration as fallback if the function "
+ "definition is not imported."));
+
/// Pass a workload description file - an example of workload would be the
/// functions executed to satisfy a RPC request. A workload is defined by a root
/// function and the list of functions that are (frequently) needed to satisfy
@@ -245,8 +256,12 @@ static auto qualifyCalleeCandidates(
}
/// Given a list of possible callee implementation for a call site, select one
-/// that fits the \p Threshold. If none are found, the Reason will give the last
-/// reason for the failure (last, in the order of CalleeSummaryList entries).
+/// that fits the \p Threshold for function definition import. If none are
+/// found, the Reason will give the last reason for the failure (last, in the
+/// order of CalleeSummaryList entries). While looking for a callee definition,
+/// sets \p TooLargeOrNoInlineSummary to the last seen too-large or noinline
+/// candidate; other modules may want to know the function summary or
+/// declaration even if a definition is not needed.
///
/// FIXME: select "best" instead of first that fits. But what is "best"?
/// - The smallest: more likely to be inlined.
@@ -259,24 +274,32 @@ static const GlobalValueSummary *
selectCallee(const ModuleSummaryIndex &Index,
ArrayRef<std::unique_ptr<GlobalValueSummary>> CalleeSummaryList,
unsigned Threshold, StringRef CallerModulePath,
+ const GlobalValueSummary *&TooLargeOrNoInlineSummary,
FunctionImporter::ImportFailureReason &Reason) {
+ // Records the last summary with reason noinline or too-large.
+ TooLargeOrNoInlineSummary = nullptr;
auto QualifiedCandidates =
qualifyCalleeCandidates(Index, CalleeSummaryList, CallerModulePath);
for (auto QualifiedValue : QualifiedCandidates) {
Reason = QualifiedValue.first;
+ // Skip a summary if its import is not (proved to be) legal.
if (Reason != FunctionImporter::ImportFailureReason::None)
continue;
auto *Summary =
cast<FunctionSummary>(QualifiedValue.second->getBaseObject());
+ // Don't bother importing the definition if the chance of inlining it is
+ // not high enough (except under `--force-import-all`).
if ((Summary->instCount() > Threshold) && !Summary->fflags().AlwaysInline &&
!ForceImportAll) {
+ TooLargeOrNoInlineSummary = Summary;
Reason = FunctionImporter::ImportFailureReason::TooLarge;
continue;
}
- // Don't bother importing if we can't inline it anyway.
+ // Don't bother importing the definition if we can't inline it anyway.
if (Summary->fflags().NoInline && !ForceImportAll) {
+ TooLargeOrNoInlineSummary = Summary;
Reason = FunctionImporter::ImportFailureReason::NoInline;
continue;
}
@@ -358,17 +381,27 @@ class GlobalsImporter final {
if (!GVS || !Index.canImportGlobalVar(GVS, /* AnalyzeRefs */ true) ||
LocalNotInModule(GVS))
continue;
- auto ILI = ImportList[RefSummary->modulePath()].insert(VI.getGUID());
+
+ // If there isn't an entry for GUID, insert <GUID, Definition> pair.
+ // Otherwise, definition should take precedence over declaration.
+ auto [Iter, Inserted] =
+ ImportList[RefSummary->modulePath()].try_emplace(
+ VI.getGUID(), GlobalValueSummary::Definition);
// Only update stat and exports if we haven't already imported this
// variable.
- if (!ILI.second)
+ if (!Inserted) {
+ // Set the value to 'std::min(existing-value, new-value)' to make
+ // sure a definition takes precedence over a declaration.
+ Iter->second = std::min(GlobalValueSummary::Definition, Iter->second);
break;
+ }
NumImportedGlobalVarsThinLink++;
// Any references made by this variable will be marked exported
// later, in ComputeCrossModuleImport, after import decisions are
// complete, which is more efficient than adding them here.
if (ExportLists)
- (*ExportLists)[RefSummary->modulePath()].insert(VI);
+ (*ExportLists)[RefSummary->modulePath()][VI] =
+ GlobalValueSummary::Definition;
// If variable is not writeonly we attempt to recursively analyze
// its references in order to import referenced constants.
@@ -545,10 +578,11 @@ class WorkloadImportsManager : public ModuleImportsManager {
LLVM_DEBUG(dbgs() << "[Workload][Including]" << VI.name() << " from "
<< ExportingModule << " : "
<< Function::getGUID(VI.name()) << "\n");
- ImportList[ExportingModule].insert(VI.getGUID());
+ ImportList[ExportingModule][VI.getGUID()] =
+ GlobalValueSummary::Definition;
GVI.onImportingSummary(*GVS);
if (ExportLists)
- (*ExportLists)[ExportingModule].insert(VI);
+ (*ExportLists)[ExportingModule][VI] = GlobalValueSummary::Definition;
}
LLVM_DEBUG(dbgs() << "[Workload] Done\n");
}
@@ -769,9 +803,28 @@ static void computeImportForFunction(
}
FunctionImporter::ImportFailureReason Reason{};
- CalleeSummary = selectCallee(Index, VI.getSummaryList(), NewThreshold,
- Summary.modulePath(), Reason);
+
+ // `SummaryForDeclImport` is an summary eligible for declaration import.
+ const GlobalValueSummary *SummaryForDeclImport = nullptr;
+ CalleeSummary =
+ selectCallee(Index, VI.getSummaryList(), NewThreshold,
+ Summary.modulePath(), SummaryForDeclImport, Reason);
if (!CalleeSummary) {
+ // There isn't a callee for definition import but one for declaration
+ // import.
+ if (ImportDeclaration && SummaryForDeclImport) {
+ StringRef DeclSourceModule = SummaryForDeclImport->modulePath();
+
+ // Since definition takes precedence over declaration for the same VI,
+ // try emplace <VI, declaration> pair without checking insert result.
+ // If insert doesn't happen, there must be an existing entry keyed by
+ // VI.
+ if (ExportLists)
+ (*ExportLists)[DeclSourceModule].try_emplace(
+ VI, GlobalValueSummary::Declaration);
+ ImportList[DeclSourceModule].try_emplace(
+ VI.getGUID(), GlobalValueSummary::Declaration);
+ }
// Update with new larger threshold if this was a retry (otherwise
// we would have already inserted with NewThreshold above). Also
// update failure info if requested.
@@ -816,11 +869,15 @@ static void computeImportForFunction(
"selectCallee() didn't honor the threshold");
auto ExportModulePath = ResolvedCalleeSummary->modulePath();
- auto ILI = ImportList[ExportModulePath].insert(VI.getGUID());
+
+ // Try emplace the definition entry, and update stats based on insertion
+ // status.
+ auto [Iter, Inserted] = ImportList[ExportModulePath].try_emplace(
+ VI.getGUID(), GlobalValueSummary::Definition);
+
// We previously decided to import this GUID definition if it was already
// inserted in the set of imports from the exporting module.
- bool PreviouslyImported = !ILI.second;
- if (!PreviouslyImported) {
+ if (Inserted || Iter->second == GlobalValueSummary::Declaration) {
NumImportedFunctionsThinLink++;
if (IsHotCallsite)
NumImportedHotFunctionsThinLink++;
@@ -828,11 +885,14 @@ static void computeImportForFunction(
NumImportedCriticalFunctionsThinLink++;
}
+ if (Iter->second == GlobalValueSummary::Declaration)
+ Iter->second = GlobalValueSummary::Definition;
+
// Any calls/references made by this function will be marked exported
// later, in ComputeCrossModuleImport, after import decisions are
// complete, which is more efficient than adding them here.
if (ExportLists)
- (*ExportLists)[ExportModulePath].insert(VI);
+ (*ExportLists)[ExportModulePath][VI] = GlobalValueSummary::Definition;
}
auto GetAdjustedThreshold = [](unsigned Threshold, bool IsHotCallsite) {
@@ -939,12 +999,20 @@ static bool isGlobalVarSummary(const ModuleSummaryIndex &Index,
}
template <class T>
-static unsigned numGlobalVarSummaries(const ModuleSummaryIndex &Index,
- T &Cont) {
+static unsigned numGlobalVarSummaries(const ModuleSummaryIndex &Index, T &Cont,
+ unsigned &DefinedGVS,
+ unsigned &DefinedFS) {
unsigned NumGVS = 0;
- for (auto &V : Cont)
- if (isGlobalVarSummary(Index, V))
+ DefinedGVS = 0;
+ DefinedFS = 0;
+ for (auto &[GUID, Type] : Cont) {
+ if (isGlobalVarSummary(Index, GUID)) {
+ if (Type == GlobalValueSummary::Definition)
+ ++DefinedGVS;
++NumGVS;
+ } else if (Type == GlobalValueSummary::Definition)
+ ++DefinedFS;
+ }
return NumGVS;
}
#endif
@@ -954,13 +1022,12 @@ static bool checkVariableImport(
const ModuleSummaryIndex &Index,
DenseMap<StringRef, FunctionImporter::ImportMapTy> &ImportLists,
DenseMap<StringRef, FunctionImporter::ExportSetTy> &ExportLists) {
-
DenseSet<GlobalValue::GUID> FlattenedImports;
for (auto &ImportPerModule : ImportLists)
for (auto &ExportPerModule : ImportPerModule.second)
- FlattenedImports.insert(ExportPerModule.second.begin(),
- ExportPerModule.second.end());
+ for (auto &[GUID, Type] : ExportPerModule.second)
+ FlattenedImports.insert(GUID);
// Checks that all GUIDs of read/writeonly vars we see in export lists
// are also in the import lists. Otherwise we my face linker undefs,
@@ -979,7 +1046,7 @@ static bool checkVariableImport(
};
for (auto &ExportPerModule : ExportLists)
- for (auto &VI : ExportPerModule.second)
+ for (auto &[VI, Unused] : ExportPerModule.second)
if (!FlattenedImports.count(VI.getGUID()) &&
IsReadOrWriteOnlyVarNeedingImporting(ExportPerModule.first, VI))
return false;
@@ -1015,7 +1082,11 @@ void llvm::ComputeCrossModuleImport(
FunctionImporter::ExportSetTy NewExports;
const auto &DefinedGVSummaries =
ModuleToDefinedGVSummaries.lookup(ELI.first);
- for (auto &EI : ELI.second) {
+ for (auto &[EI, Type] : ELI.second) {
+ // If a variable is exported as a declaration, its 'refs' and 'calls' are
+ // not further exported.
+ if (Type == GlobalValueSummary::Declaration)
+ continue;
// Find the copy defined in the exporting module so that we can mark the
// values it references in that specific definition as exported.
// Below we will add all references and called values, without regard to
@@ -1034,22 +1105,31 @@ void llvm::ComputeCrossModuleImport(
// we convert such variables initializers to "zeroinitializer".
// See processGlobalForThinLTO.
if (!Index.isWriteOnly(GVS))
- for (const auto &VI : GVS->refs())
- NewExports.insert(VI);
+ for (const auto &VI : GVS->refs()) {
+ // Try to emplace the declaration entry. If a definition entry
+ // already exists for key `VI`, this is a no-op.
+ NewExports.try_emplace(VI, GlobalValueSummary::Declaration);
+ }
} else {
auto *FS = cast<FunctionSummary>(S);
- for (const auto &Edge : FS->calls())
- NewExports.insert(Edge.first);
- for (const auto &Ref : FS->refs())
- NewExports.insert(Ref);
+ for (const auto &Edge : FS->calls()) {
+ // Try to emplace the declaration entry. If a definition entry
+ // already exists for key `VI`, this is a no-op.
+ NewExports.try_emplace(Edge.first, GlobalValueSummary::Declaration);
+ }
+ for (const auto &Ref : FS->refs()) {
+ // Try to emplace the declaration entry. If a definition entry
+ // already exists for key `VI`, this is a no-op.
+ NewExports.try_emplace(Ref, GlobalValueSummary::Declaration);
+ }
}
}
- // Prune list computed above to only include values defined in the exporting
- // module. We do this after the above insertion since we may hit the same
- // ref/call target multiple times in above loop, and it is more efficient to
- // avoid a set lookup each time.
+ // Prune list computed above to only include values defined in the
+ // exporting module. We do this after the above insertion since we may hit
+ // the same ref/call target multiple times in above loop, and it is more
+ // efficient to avoid a set lookup each time.
for (auto EI = NewExports.begin(); EI != NewExports.end();) {
- if (!DefinedGVSummaries.count(EI->getGUID()))
+ if (!DefinedGVSummaries.count(EI->first.getGUID()))
NewExports.erase(EI++);
else
++EI;
@@ -1064,18 +1144,29 @@ void llvm::ComputeCrossModuleImport(
for (auto &ModuleImports : ImportLists) {
auto ModName = ModuleImports.first;
auto &Exports = ExportLists[ModName];
- unsigned NumGVS = numGlobalVarSummaries(Index, Exports);
- LLVM_DEBUG(dbgs() << "* Module " << ModName << " exports "
- << Exports.size() - NumGVS << " functions and " << NumGVS
- << " vars. Imports from " << ModuleImports.second.size()
- << " modules.\n");
+ unsigned DefinedGVS = 0, DefinedFS = 0;
+ unsigned NumGVS =
+ numGlobalVarSummaries(Index, Exports, DefinedGVS, DefinedFS);
+ LLVM_DEBUG(dbgs() << "* Module " << ModName << " exports " << DefinedFS
+ << " function as definitions, "
+ << Exports.size() - NumGVS - DefinedFS
+ << " functions as declarations, " << DefinedGVS
+ << " var definitions and " << NumGVS - DefinedGVS
+ << " var declarations. Imports from "
+ << ModuleImports.second.size() << " modules.\n");
for (auto &Src : ModuleImports.second) {
auto SrcModName = Src.first;
- unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second);
- LLVM_DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod
- << " functions imported from " << SrcModName << "\n");
- LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod
- << " global vars imported from " << SrcModName << "\n");
+ unsigned DefinedGVS = 0, DefinedFS = 0;
+ unsigned NumGVSPerMod =
+ numGlobalVarSummaries(Index, Src.second, DefinedGVS, DefinedFS);
+ LLVM_DEBUG(dbgs() << " - " << DefinedFS << " function definitions and "
+ << Src.second.size() - NumGVSPerMod - DefinedFS
+ << " function declarations imported from " << SrcModName
+ << "\n");
+ LLVM_DEBUG(dbgs() << " - " << DefinedGVS << " global vars definition and "
+ << NumGVSPerMod - DefinedGVS
+ << " global vars declaration imported from "
+ << SrcModName << "\n");
}
}
#endif
@@ -1089,11 +1180,17 @@ static void dumpImportListForModule(const ModuleSummaryIndex &Index,
<< ImportList.size() << " modules.\n");
for (auto &Src : ImportList) {
auto SrcModName = Src.first;
- unsigned NumGVSPerMod = numGlobalVarSummaries(Index, Src.second);
- LLVM_DEBUG(dbgs() << " - " << Src.second.size() - NumGVSPerMod
- << " functions imported from " << SrcModName << "\n");
- LLVM_DEBUG(dbgs() << " - " << NumGVSPerMod << " vars imported from "
- << SrcModName << "\n");
+ unsigned DefinedGVS = 0, DefinedFS = 0;
+ unsigned NumGVSPerMod =
+ numGlobalVarSummaries(Index, Src.second, DefinedGVS, DefinedFS);
+ LLVM_DEBUG(dbgs() << " - " << DefinedFS << " function definitions and "
+ << Src.second.size() - DefinedFS - NumGVSPerMod
+ << " function declarations imported from " << SrcModName
+ << "\n");
+ LLVM_DEBUG(dbgs() << " - " << DefinedGVS << " var definitions and "
+ << NumGVSPerMod - DefinedGVS
+ << " var declarations imported from " << SrcModName
+ << "\n");
}
}
#endif
@@ -1149,7 +1246,13 @@ static void ComputeCrossModuleImportForModuleFromIndexForTest(
if (Summary->modulePath() == ModulePath)
continue;
// Add an entry to provoke importing by thinBackend.
- ImportList[Summary->modulePath()].insert(GUID);
+ auto [Iter, Inserted] = ImportList[Summary->modulePath()].try_emplace(
+ GUID, Summary->importType());
+ if (!Inserted) {
+ // Use 'std::min' to make sure definition (with enum value 0) takes
+ // precedence over declaration (with enum value 1).
+ Iter->second = std::min(Iter->second, Summary->importType());
+ }
}
#ifndef NDEBUG
dumpImportListForModule(Index, ModulePath, ImportList);
@@ -1339,13 +1442,17 @@ void llvm::gatherImportedSummariesForModule(
// Include summaries for imports.
for (const auto &ILI : ImportList) {
auto &SummariesForIndex = ModuleToSummariesForIndex[std::string(ILI.first)];
+
const auto &DefinedGVSummaries =
ModuleToDefinedGVSummaries.lookup(ILI.first);
- for (const auto &GI : ILI.second) {
- const auto &DS = DefinedGVSummaries.find(GI);
+ for (const auto &[GUID, Type] : ILI.second) {
+ const auto &DS = DefinedGVSummaries.find(GUID);
assert(DS != DefinedGVSummaries.end() &&
"Expected a defined summary for imported global value");
- SummariesForIndex[GI] = DS->second;
+ if (Type == GlobalValueSummary::Declaration)
+ continue;
+
+ SummariesForIndex[GUID] = DS->second;
}
}
}
@@ -1617,6 +1724,16 @@ Expected<bool> FunctionImporter::importFunctions(
for (const auto &FunctionsToImportPerModule : ImportList) {
ModuleNameOrderedList.insert(FunctionsToImportPerModule.first);
}
+
+ auto getImportType = [&](const FunctionsToImportTy &GUIDToImportType,
+ GlobalValue::GUID GUID)
+ -> std::optional<GlobalValueSummary::ImportKind> {
+ auto Iter = GUIDToImportType.find(GUID);
+ if (Iter == GUIDToImportType.end())
+ return std::nullopt;
+ return Iter->second;
+ };
+
for (const auto &Name : ModuleNameOrderedList) {
// Get the module for the import
const auto &FunctionsToImportPerModule = ImportList.find(Name);
@@ -1634,17 +1751,27 @@ Expected<bool> FunctionImporter::importFunctions(
return std::move(Err);
auto &ImportGUIDs = FunctionsToImportPerModule->second;
+
// Find the globals to import
SetVector<GlobalValue *> GlobalsToImport;
for (Function &F : *SrcModule) {
if (!F.hasName())
continue;
auto GUID = F.getGUID();
- auto Import = ImportGUIDs.count(GUID);
- LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing function "
+ auto MaybeImportType = getImportType(ImportGUIDs, GUID);
+
+ bool ImportDefinition =
+ (MaybeImportType &&
+ (*MaybeImportType == GlobalValueSummary::Definition));
+
+ LLVM_DEBUG(dbgs() << (MaybeImportType ? "Is" : "Not")
+ << " importing function"
+ << (ImportDefinition
+ ? " definition "
+ : (MaybeImportType ? " declaration " : " "))
<< GUID << " " << F.getName() << " from "
<< SrcModule->getSourceFileName() << "\n");
- if (Import) {
+ if (ImportDefinition) {
if (Error Err = F.materialize())
return std::move(Err);
// MemProf should match function's definition and summary,
@@ -1670,11 +1797,20 @@ Expected<bool> FunctionImporter::importFunctions(
if (!GV.hasName())
continue;
auto GUID = GV.getGUID();
- auto Import = ImportGUIDs.count(GUID);
- LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing global "
+ auto MaybeImportType = getImportType(ImportGUIDs, GUID);
+
+ bool ImportDefinition =
+ (MaybeImportType &&
+ (*MaybeImportType == GlobalValueSummary::Definition));
+
+ LLVM_DEBUG(dbgs() << (MaybeImportType ? "Is" : "Not")
+ << " importing global"
+ << (ImportDefinition
+ ? " definition "
+ : (MaybeImportType ? " declaration " : " "))
<< GUID << " " << GV.getName() << " from "
<< SrcModule->getSourceFileName() << "\n");
- if (Import) {
+ if (ImportDefinition) {
if (Error Err = GV.materialize())
return std::move(Err);
ImportedGVCount += GlobalsToImport.insert(&GV);
@@ -1684,11 +1820,20 @@ Expected<bool> FunctionImporter::importFunctions(
if (!GA.hasName() || isa<GlobalIFunc>(GA.getAliaseeObject()))
continue;
auto GUID = GA.getGUID();
- auto Import = ImportGUIDs.count(GUID);
- LLVM_DEBUG(dbgs() << (Import ? "Is" : "Not") << " importing alias "
+ auto MaybeImportType = getImportType(ImportGUIDs, GUID);
+
+ bool ImportDefinition =
+ (MaybeImportType &&
+ (*MaybeImportType == GlobalValueSummary::Definition));
+
+ LLVM_DEBUG(dbgs() << (MaybeImportType ? "Is" : "Not")
+ << " importing alias"
+ << (ImportDefinition
+ ? " definition "
+ : (MaybeImportType ? " declaration " : " "))
<< GUID << " " << GA.getName() << " from "
<< SrcModule->getSourceFileName() << "\n");
- if (Import) {
+ if (ImportDefinition) {
if (Error Err = GA.materialize())
return std::move(Err);
// Import alias as a copy of its aliasee.
@@ -1754,6 +1899,7 @@ Expected<bool> FunctionImporter::importFunctions(
NumImportedFunctions += (ImportedCount - ImportedGVCount);
NumImportedGlobalVars += ImportedGVCount;
+ // TODO: Print counters for definitions and declarations in the debugging log.
LLVM_DEBUG(dbgs() << "Imported " << ImportedCount - ImportedGVCount
<< " functions for Module "
<< DestModule.getModuleIdentifier() << "\n");
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
index cdec8c8c7c80..b6f339da31f7 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineCalls.cpp
@@ -2424,17 +2424,6 @@ Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
break;
}
case Intrinsic::fmuladd: {
- // Canonicalize fast fmuladd to the separate fmul + fadd.
- if (II->isFast()) {
- BuilderTy::FastMathFlagGuard Guard(Builder);
- Builder.setFastMathFlags(II->getFastMathFlags());
- Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
- II->getArgOperand(1));
- Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
- Add->takeName(II);
- return replaceInstUsesWith(*II, Add);
- }
-
// Try to simplify the underlying FMul.
if (Value *V = simplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1),
II->getFastMathFlags(),
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
index 4351a55ea1d3..832f89ed0b64 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineLoadStoreAlloca.cpp
@@ -332,7 +332,7 @@ bool PointerReplacer::collectUsersRecursive(Instruction &I) {
Worklist.insert(SI);
if (!collectUsersRecursive(*SI))
return false;
- } else if (isa<GetElementPtrInst, BitCastInst>(Inst)) {
+ } else if (isa<GetElementPtrInst>(Inst)) {
Worklist.insert(Inst);
if (!collectUsersRecursive(*Inst))
return false;
@@ -393,15 +393,6 @@ void PointerReplacer::replace(Instruction *I) {
NewI->takeName(GEP);
NewI->setIsInBounds(GEP->isInBounds());
WorkMap[GEP] = NewI;
- } else if (auto *BC = dyn_cast<BitCastInst>(I)) {
- auto *V = getReplacement(BC->getOperand(0));
- assert(V && "Operand not replaced");
- auto *NewT = PointerType::get(BC->getType()->getContext(),
- V->getType()->getPointerAddressSpace());
- auto *NewI = new BitCastInst(V, NewT);
- IC.InsertNewInstWith(NewI, BC->getIterator());
- NewI->takeName(BC);
- WorkMap[BC] = NewI;
} else if (auto *SI = dyn_cast<SelectInst>(I)) {
auto *NewSI = SelectInst::Create(
SI->getCondition(), getReplacement(SI->getTrueValue()),
diff --git a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
index 99f1f8eb34bb..244f099f0654 100644
--- a/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
+++ b/llvm/lib/Transforms/InstCombine/InstCombineVectorOps.cpp
@@ -619,7 +619,7 @@ static bool collectSingleShuffleElements(Value *V, Value *LHS, Value *RHS,
"Invalid CollectSingleShuffleElements");
unsigned NumElts = cast<FixedVectorType>(V->getType())->getNumElements();
- if (match(V, m_Undef())) {
+ if (match(V, m_Poison())) {
Mask.assign(NumElts, -1);
return true;
}
@@ -1319,7 +1319,7 @@ static Instruction *foldInsEltIntoSplat(InsertElementInst &InsElt) {
static Instruction *foldInsEltIntoIdentityShuffle(InsertElementInst &InsElt) {
// Check if the vector operand of this insert is an identity shuffle.
auto *Shuf = dyn_cast<ShuffleVectorInst>(InsElt.getOperand(0));
- if (!Shuf || !match(Shuf->getOperand(1), m_Undef()) ||
+ if (!Shuf || !match(Shuf->getOperand(1), m_Poison()) ||
!(Shuf->isIdentityWithExtract() || Shuf->isIdentityWithPadding()))
return nullptr;
@@ -2214,19 +2214,19 @@ static Instruction *canonicalizeInsertSplat(ShuffleVectorInst &Shuf,
uint64_t IndexC;
// Match a shuffle that is a splat to a non-zero element.
- if (!match(Op0, m_OneUse(m_InsertElt(m_Undef(), m_Value(X),
+ if (!match(Op0, m_OneUse(m_InsertElt(m_Poison(), m_Value(X),
m_ConstantInt(IndexC)))) ||
- !match(Op1, m_Undef()) || match(Mask, m_ZeroMask()) || IndexC == 0)
+ !match(Op1, m_Poison()) || match(Mask, m_ZeroMask()) || IndexC == 0)
return nullptr;
// Insert into element 0 of a poison vector.
PoisonValue *PoisonVec = PoisonValue::get(Shuf.getType());
Value *NewIns = Builder.CreateInsertElement(PoisonVec, X, (uint64_t)0);
- // Splat from element 0. Any mask element that is undefined remains undefined.
+ // Splat from element 0. Any mask element that is poison remains poison.
// For example:
- // shuf (inselt undef, X, 2), _, <2,2,undef>
- // --> shuf (inselt undef, X, 0), poison, <0,0,undef>
+ // shuf (inselt poison, X, 2), _, <2,2,undef>
+ // --> shuf (inselt poison, X, 0), poison, <0,0,undef>
unsigned NumMaskElts =
cast<FixedVectorType>(Shuf.getType())->getNumElements();
SmallVector<int, 16> NewMask(NumMaskElts, 0);
@@ -2383,7 +2383,7 @@ static Instruction *foldTruncShuffle(ShuffleVectorInst &Shuf,
Type *DestType = Shuf.getType();
Value *X;
if (!match(Shuf.getOperand(0), m_BitCast(m_Value(X))) ||
- !match(Shuf.getOperand(1), m_Undef()) || !DestType->isIntOrIntVectorTy())
+ !match(Shuf.getOperand(1), m_Poison()) || !DestType->isIntOrIntVectorTy())
return nullptr;
// The source type must have the same number of elements as the shuffle,
@@ -2416,13 +2416,13 @@ static Instruction *foldTruncShuffle(ShuffleVectorInst &Shuf,
}
/// Match a shuffle-select-shuffle pattern where the shuffles are widening and
-/// narrowing (concatenating with undef and extracting back to the original
+/// narrowing (concatenating with poison and extracting back to the original
/// length). This allows replacing the wide select with a narrow select.
static Instruction *narrowVectorSelect(ShuffleVectorInst &Shuf,
InstCombiner::BuilderTy &Builder) {
// This must be a narrowing identity shuffle. It extracts the 1st N elements
// of the 1st vector operand of a shuffle.
- if (!match(Shuf.getOperand(1), m_Undef()) || !Shuf.isIdentityWithExtract())
+ if (!match(Shuf.getOperand(1), m_Poison()) || !Shuf.isIdentityWithExtract())
return nullptr;
// The vector being shuffled must be a vector select that we can eliminate.
@@ -2432,19 +2432,20 @@ static Instruction *narrowVectorSelect(ShuffleVectorInst &Shuf,
m_OneUse(m_Select(m_Value(Cond), m_Value(X), m_Value(Y)))))
return nullptr;
- // We need a narrow condition value. It must be extended with undef elements
+ // We need a narrow condition value. It must be extended with poison elements
// and have the same number of elements as this shuffle.
unsigned NarrowNumElts =
cast<FixedVectorType>(Shuf.getType())->getNumElements();
Value *NarrowCond;
- if (!match(Cond, m_OneUse(m_Shuffle(m_Value(NarrowCond), m_Undef()))) ||
+ if (!match(Cond, m_OneUse(m_Shuffle(m_Value(NarrowCond), m_Poison()))) ||
cast<FixedVectorType>(NarrowCond->getType())->getNumElements() !=
NarrowNumElts ||
!cast<ShuffleVectorInst>(Cond)->isIdentityWithPadding())
return nullptr;
- // shuf (sel (shuf NarrowCond, undef, WideMask), X, Y), undef, NarrowMask) -->
- // sel NarrowCond, (shuf X, undef, NarrowMask), (shuf Y, undef, NarrowMask)
+ // shuf (sel (shuf NarrowCond, poison, WideMask), X, Y), poison, NarrowMask)
+ // -->
+ // sel NarrowCond, (shuf X, poison, NarrowMask), (shuf Y, poison, NarrowMask)
Value *NarrowX = Builder.CreateShuffleVector(X, Shuf.getShuffleMask());
Value *NarrowY = Builder.CreateShuffleVector(Y, Shuf.getShuffleMask());
return SelectInst::Create(NarrowCond, NarrowX, NarrowY);
@@ -2462,7 +2463,7 @@ static Instruction *foldShuffleOfUnaryOps(ShuffleVectorInst &Shuf,
// Match 1-input (unary) shuffle.
// shuffle (fneg/fabs X), Mask --> fneg/fabs (shuffle X, Mask)
- if (S0->hasOneUse() && match(Shuf.getOperand(1), m_Undef())) {
+ if (S0->hasOneUse() && match(Shuf.getOperand(1), m_Poison())) {
Value *NewShuf = Builder.CreateShuffleVector(X, Shuf.getShuffleMask());
if (IsFNeg)
return UnaryOperator::CreateFNegFMF(NewShuf, S0);
@@ -2549,7 +2550,7 @@ static Instruction *foldCastShuffle(ShuffleVectorInst &Shuf,
/// Try to fold an extract subvector operation.
static Instruction *foldIdentityExtractShuffle(ShuffleVectorInst &Shuf) {
Value *Op0 = Shuf.getOperand(0), *Op1 = Shuf.getOperand(1);
- if (!Shuf.isIdentityWithExtract() || !match(Op1, m_Undef()))
+ if (!Shuf.isIdentityWithExtract() || !match(Op1, m_Poison()))
return nullptr;
// Check if we are extracting all bits of an inserted scalar:
@@ -2578,10 +2579,10 @@ static Instruction *foldIdentityExtractShuffle(ShuffleVectorInst &Shuf) {
// not allow arbitrary shuffle mask creation as a target-independent transform
// (because we can't guarantee that will lower efficiently).
//
- // If the extracting shuffle has an undef mask element, it transfers to the
+ // If the extracting shuffle has an poison mask element, it transfers to the
// new shuffle mask. Otherwise, copy the original mask element. Example:
- // shuf (shuf X, Y, <C0, C1, C2, undef, C4>), undef, <0, undef, 2, 3> -->
- // shuf X, Y, <C0, undef, C2, undef>
+ // shuf (shuf X, Y, <C0, C1, C2, poison, C4>), poison, <0, poison, 2, 3> -->
+ // shuf X, Y, <C0, poison, C2, poison>
unsigned NumElts = cast<FixedVectorType>(Shuf.getType())->getNumElements();
SmallVector<int, 16> NewMask(NumElts);
assert(NumElts < Mask.size() &&
@@ -2755,17 +2756,17 @@ static Instruction *foldIdentityPaddedShuffles(ShuffleVectorInst &Shuf) {
// BinOp's operands are the result of a first element splat can be simplified to
// splatting the first element of the result of the BinOp
Instruction *InstCombinerImpl::simplifyBinOpSplats(ShuffleVectorInst &SVI) {
- if (!match(SVI.getOperand(1), m_Undef()) ||
+ if (!match(SVI.getOperand(1), m_Poison()) ||
!match(SVI.getShuffleMask(), m_ZeroMask()) ||
!SVI.getOperand(0)->hasOneUse())
return nullptr;
Value *Op0 = SVI.getOperand(0);
Value *X, *Y;
- if (!match(Op0, m_BinOp(m_Shuffle(m_Value(X), m_Undef(), m_ZeroMask()),
+ if (!match(Op0, m_BinOp(m_Shuffle(m_Value(X), m_Poison(), m_ZeroMask()),
m_Value(Y))) &&
!match(Op0, m_BinOp(m_Value(X),
- m_Shuffle(m_Value(Y), m_Undef(), m_ZeroMask()))))
+ m_Shuffle(m_Value(Y), m_Poison(), m_ZeroMask()))))
return nullptr;
if (X->getType() != Y->getType())
return nullptr;
@@ -2901,7 +2902,7 @@ Instruction *InstCombinerImpl::visitShuffleVectorInst(ShuffleVectorInst &SVI) {
if (Instruction *I = foldIdentityPaddedShuffles(SVI))
return I;
- if (match(RHS, m_Undef()) && canEvaluateShuffled(LHS, Mask)) {
+ if (match(RHS, m_Poison()) && canEvaluateShuffled(LHS, Mask)) {
Value *V = evaluateInDifferentElementOrder(LHS, Mask, Builder);
return replaceInstUsesWith(SVI, V);
}
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 8d39217992c7..2aa21759d56e 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -1589,6 +1589,14 @@ void HWAddressSanitizer::sanitizeFunction(Function &F,
assert(!ShadowBase);
+ // Remove memory attributes that are about to become invalid.
+ // HWASan checks read from shadow, which invalidates memory(argmem: *)
+ // Short granule checks on function arguments read from the argument memory
+ // (last byte of the granule), which invalidates writeonly.
+ F.removeFnAttr(llvm::Attribute::Memory);
+ for (auto &A : F.args())
+ A.removeAttr(llvm::Attribute::WriteOnly);
+
BasicBlock::iterator InsertPt = F.getEntryBlock().begin();
IRBuilder<> EntryIRB(&F.getEntryBlock(), InsertPt);
emitPrologue(EntryIRB,
diff --git a/llvm/lib/Transforms/Scalar/DivRemPairs.cpp b/llvm/lib/Transforms/Scalar/DivRemPairs.cpp
index 45f36a36b5dd..f7ada9fb8eb8 100644
--- a/llvm/lib/Transforms/Scalar/DivRemPairs.cpp
+++ b/llvm/lib/Transforms/Scalar/DivRemPairs.cpp
@@ -381,8 +381,7 @@ static bool optimizeDivRem(Function &F, const TargetTransformInfo &TTI,
// %mul = mul %div, 1 // %mul = undef
// %rem = sub %x, %mul // %rem = undef - undef = undef
// If X is not frozen, %rem becomes undef after transformation.
- // TODO: We need a undef-specific checking function in ValueTracking
- if (!isGuaranteedNotToBeUndefOrPoison(X, nullptr, DivInst, &DT)) {
+ if (!isGuaranteedNotToBeUndef(X, nullptr, DivInst, &DT)) {
auto *FrX =
new FreezeInst(X, X->getName() + ".frozen", DivInst->getIterator());
DivInst->setOperand(0, FrX);
@@ -390,7 +389,7 @@ static bool optimizeDivRem(Function &F, const TargetTransformInfo &TTI,
}
// Same for Y. If X = 1 and Y = (undef | 1), %rem in src is either 1 or 0,
// but %rem in tgt can be one of many integer values.
- if (!isGuaranteedNotToBeUndefOrPoison(Y, nullptr, DivInst, &DT)) {
+ if (!isGuaranteedNotToBeUndef(Y, nullptr, DivInst, &DT)) {
auto *FrY =
new FreezeInst(Y, Y->getName() + ".frozen", DivInst->getIterator());
DivInst->setOperand(1, FrY);
diff --git a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
index 7ef5dceffec0..8fe3780bcf1b 100644
--- a/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
+++ b/llvm/lib/Transforms/Scalar/MemCpyOptimizer.cpp
@@ -983,10 +983,8 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
append_range(srcUseList, U->users());
continue;
}
- if (const auto *G = dyn_cast<GetElementPtrInst>(U)) {
- if (!G->hasAllZeroIndices())
- return false;
-
+ if (const auto *G = dyn_cast<GetElementPtrInst>(U);
+ G && G->hasAllZeroIndices()) {
append_range(srcUseList, U->users());
continue;
}
@@ -994,8 +992,10 @@ bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpyLoad,
if (IT->isLifetimeStartOrEnd())
continue;
- if (U != C && U != cpyLoad)
+ if (U != C && U != cpyLoad) {
+ LLVM_DEBUG(dbgs() << "Call slot: Source accessed by " << *U << "\n");
return false;
+ }
}
// Check whether src is captured by the called function, in which case there
diff --git a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
index 308622615332..224cd24915fa 100644
--- a/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
+++ b/llvm/lib/Transforms/Scalar/NaryReassociate.cpp
@@ -519,6 +519,7 @@ Instruction *NaryReassociatePass::tryReassociatedBinaryOp(const SCEV *LHSExpr,
default:
llvm_unreachable("Unexpected instruction.");
}
+ NewI->setDebugLoc(I->getDebugLoc());
NewI->takeName(I);
return NewI;
}
diff --git a/llvm/lib/Transforms/Scalar/SROA.cpp b/llvm/lib/Transforms/Scalar/SROA.cpp
index 096c6d1b1fad..756daf5bb41f 100644
--- a/llvm/lib/Transforms/Scalar/SROA.cpp
+++ b/llvm/lib/Transforms/Scalar/SROA.cpp
@@ -630,7 +630,7 @@ public:
int OldSize = Slices.size();
Slices.append(NewSlices.begin(), NewSlices.end());
auto SliceI = Slices.begin() + OldSize;
- llvm::sort(SliceI, Slices.end());
+ std::stable_sort(SliceI, Slices.end());
std::inplace_merge(Slices.begin(), SliceI, Slices.end());
}
@@ -5122,7 +5122,7 @@ bool SROA::splitAlloca(AllocaInst &AI, AllocaSlices &AS) {
}
if (!IsSorted)
- llvm::sort(AS);
+ llvm::stable_sort(AS);
/// Describes the allocas introduced by rewritePartition in order to migrate
/// the debug info.
diff --git a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
index 7ac1f43b7b6a..08ba08daa9d9 100644
--- a/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
+++ b/llvm/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp
@@ -972,14 +972,9 @@ SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
bool SeparateConstOffsetFromGEP::reorderGEP(GetElementPtrInst *GEP,
TargetTransformInfo &TTI) {
- if (GEP->getNumIndices() != 1)
- return false;
-
auto PtrGEP = dyn_cast<GetElementPtrInst>(GEP->getPointerOperand());
if (!PtrGEP)
return false;
- if (PtrGEP->getNumIndices() != 1)
- return false;
bool NestedNeedsExtraction;
int64_t NestedByteOffset =
@@ -997,14 +992,12 @@ bool SeparateConstOffsetFromGEP::reorderGEP(GetElementPtrInst *GEP,
bool PtrGEPInBounds = PtrGEP->isInBounds();
bool IsChainInBounds = GEPInBounds && PtrGEPInBounds;
if (IsChainInBounds) {
- auto GEPIdx = GEP->indices().begin();
- auto KnownGEPIdx = computeKnownBits(GEPIdx->get(), *DL);
- IsChainInBounds &= KnownGEPIdx.isNonNegative();
- if (IsChainInBounds) {
- auto PtrGEPIdx = PtrGEP->indices().begin();
- auto KnownPtrGEPIdx = computeKnownBits(PtrGEPIdx->get(), *DL);
- IsChainInBounds &= KnownPtrGEPIdx.isNonNegative();
- }
+ auto IsKnownNonNegative = [this](Value *V) {
+ return isKnownNonNegative(V, *DL);
+ };
+ IsChainInBounds &= all_of(GEP->indices(), IsKnownNonNegative);
+ if (IsChainInBounds)
+ IsChainInBounds &= all_of(PtrGEP->indices(), IsKnownNonNegative);
}
IRBuilder<> Builder(GEP);
diff --git a/llvm/lib/Transforms/Utils/CloneFunction.cpp b/llvm/lib/Transforms/Utils/CloneFunction.cpp
index 981183682b8b..1fef8bc46121 100644
--- a/llvm/lib/Transforms/Utils/CloneFunction.cpp
+++ b/llvm/lib/Transforms/Utils/CloneFunction.cpp
@@ -825,13 +825,6 @@ void llvm::CloneAndPruneIntoFromInst(Function *NewFunc, const Function *OldFunc,
if (!NewI)
continue;
- // Skip over non-intrinsic callsites, we don't want to remove any nodes
- // from the CGSCC.
- CallBase *CB = dyn_cast<CallBase>(NewI);
- if (CB && CB->getCalledFunction() &&
- !CB->getCalledFunction()->isIntrinsic())
- continue;
-
if (Value *V = simplifyInstruction(NewI, DL)) {
NewI->replaceAllUsesWith(V);
diff --git a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
index fd652cb78954..6d64aaa75922 100644
--- a/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
+++ b/llvm/lib/Transforms/Vectorize/LoopVectorize.cpp
@@ -5824,6 +5824,10 @@ void LoopVectorizationCostModel::collectInstsToScalarize(ElementCount VF) {
ScalarCostsVF.insert(ScalarCosts.begin(), ScalarCosts.end());
// Remember that BB will remain after vectorization.
PredicatedBBsAfterVectorization[VF].insert(BB);
+ for (auto *Pred : predecessors(BB)) {
+ if (Pred->getSingleSuccessor() == BB)
+ PredicatedBBsAfterVectorization[VF].insert(Pred);
+ }
}
}
}
diff --git a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h
index 56cbaa420129..058746880743 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h
+++ b/llvm/lib/Transforms/Vectorize/VPlanPatternMatch.h
@@ -157,7 +157,7 @@ using AllUnaryRecipe_match =
UnaryRecipe_match<Op0_t, Opcode, VPWidenRecipe, VPReplicateRecipe,
VPWidenCastRecipe, VPInstruction>;
-template <typename Op0_t, typename Op1_t, unsigned Opcode,
+template <typename Op0_t, typename Op1_t, unsigned Opcode, bool Commutative,
typename... RecipeTys>
struct BinaryRecipe_match {
Op0_t Op0;
@@ -179,18 +179,23 @@ struct BinaryRecipe_match {
return false;
assert(R->getNumOperands() == 2 &&
"recipe with matched opcode does not have 2 operands");
- return Op0.match(R->getOperand(0)) && Op1.match(R->getOperand(1));
+ if (Op0.match(R->getOperand(0)) && Op1.match(R->getOperand(1)))
+ return true;
+ return Commutative && Op0.match(R->getOperand(1)) &&
+ Op1.match(R->getOperand(0));
}
};
template <typename Op0_t, typename Op1_t, unsigned Opcode>
using BinaryVPInstruction_match =
- BinaryRecipe_match<Op0_t, Op1_t, Opcode, VPInstruction>;
+ BinaryRecipe_match<Op0_t, Op1_t, Opcode, /*Commutative*/ false,
+ VPInstruction>;
-template <typename Op0_t, typename Op1_t, unsigned Opcode>
+template <typename Op0_t, typename Op1_t, unsigned Opcode,
+ bool Commutative = false>
using AllBinaryRecipe_match =
- BinaryRecipe_match<Op0_t, Op1_t, Opcode, VPWidenRecipe, VPReplicateRecipe,
- VPWidenCastRecipe, VPInstruction>;
+ BinaryRecipe_match<Op0_t, Op1_t, Opcode, Commutative, VPWidenRecipe,
+ VPReplicateRecipe, VPWidenCastRecipe, VPInstruction>;
template <unsigned Opcode, typename Op0_t>
inline UnaryVPInstruction_match<Op0_t, Opcode>
@@ -256,10 +261,11 @@ m_ZExtOrSExt(const Op0_t &Op0) {
return m_CombineOr(m_ZExt(Op0), m_SExt(Op0));
}
-template <unsigned Opcode, typename Op0_t, typename Op1_t>
-inline AllBinaryRecipe_match<Op0_t, Op1_t, Opcode> m_Binary(const Op0_t &Op0,
- const Op1_t &Op1) {
- return AllBinaryRecipe_match<Op0_t, Op1_t, Opcode>(Op0, Op1);
+template <unsigned Opcode, typename Op0_t, typename Op1_t,
+ bool Commutative = false>
+inline AllBinaryRecipe_match<Op0_t, Op1_t, Opcode, Commutative>
+m_Binary(const Op0_t &Op0, const Op1_t &Op1) {
+ return AllBinaryRecipe_match<Op0_t, Op1_t, Opcode, Commutative>(Op0, Op1);
}
template <typename Op0_t, typename Op1_t>
@@ -268,10 +274,21 @@ m_Mul(const Op0_t &Op0, const Op1_t &Op1) {
return m_Binary<Instruction::Mul, Op0_t, Op1_t>(Op0, Op1);
}
-template <typename Op0_t, typename Op1_t>
-inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Or>
+/// Match a binary OR operation. Note that while conceptually the operands can
+/// be matched commutatively, \p Commutative defaults to false in line with the
+/// IR-based pattern matching infrastructure. Use m_c_BinaryOr for a commutative
+/// version of the matcher.
+template <typename Op0_t, typename Op1_t, bool Commutative = false>
+inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Or, Commutative>
m_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
- return m_Binary<Instruction::Or, Op0_t, Op1_t>(Op0, Op1);
+ return m_Binary<Instruction::Or, Op0_t, Op1_t, Commutative>(Op0, Op1);
+}
+
+template <typename Op0_t, typename Op1_t>
+inline AllBinaryRecipe_match<Op0_t, Op1_t, Instruction::Or,
+ /*Commutative*/ true>
+m_c_BinaryOr(const Op0_t &Op0, const Op1_t &Op1) {
+ return m_BinaryOr<Op0_t, Op1_t, /*Commutative*/ true>(Op0, Op1);
}
template <typename Op0_t, typename Op1_t>
diff --git a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
index 4c968c2834b1..7ff8d8e0ea15 100644
--- a/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
+++ b/llvm/lib/Transforms/Vectorize/VPlanTransforms.cpp
@@ -941,8 +941,8 @@ static void simplifyRecipe(VPRecipeBase &R, VPTypeAnalysis &TypeInfo) {
// recipes to be visited during simplification.
VPValue *X, *Y, *X1, *Y1;
if (match(&R,
- m_BinaryOr(m_LogicalAnd(m_VPValue(X), m_VPValue(Y)),
- m_LogicalAnd(m_VPValue(X1), m_Not(m_VPValue(Y1))))) &&
+ m_c_BinaryOr(m_LogicalAnd(m_VPValue(X), m_VPValue(Y)),
+ m_LogicalAnd(m_VPValue(X1), m_Not(m_VPValue(Y1))))) &&
X == X1 && Y == Y1) {
R.getVPSingleValue()->replaceAllUsesWith(X);
return;
diff --git a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
index 15deaf908422..b5a292841172 100644
--- a/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
+++ b/llvm/lib/Transforms/Vectorize/VectorCombine.cpp
@@ -1668,6 +1668,86 @@ bool VectorCombine::foldShuffleOfShuffles(Instruction &I) {
return true;
}
+using InstLane = std::pair<Value *, int>;
+
+static InstLane lookThroughShuffles(Value *V, int Lane) {
+ while (auto *SV = dyn_cast<ShuffleVectorInst>(V)) {
+ unsigned NumElts =
+ cast<FixedVectorType>(SV->getOperand(0)->getType())->getNumElements();
+ int M = SV->getMaskValue(Lane);
+ if (M < 0)
+ return {nullptr, PoisonMaskElem};
+ if (static_cast<unsigned>(M) < NumElts) {
+ V = SV->getOperand(0);
+ Lane = M;
+ } else {
+ V = SV->getOperand(1);
+ Lane = M - NumElts;
+ }
+ }
+ return InstLane{V, Lane};
+}
+
+static SmallVector<InstLane>
+generateInstLaneVectorFromOperand(ArrayRef<InstLane> Item, int Op) {
+ SmallVector<InstLane> NItem;
+ for (InstLane IL : Item) {
+ auto [V, Lane] = IL;
+ InstLane OpLane =
+ V ? lookThroughShuffles(cast<Instruction>(V)->getOperand(Op), Lane)
+ : InstLane{nullptr, PoisonMaskElem};
+ NItem.emplace_back(OpLane);
+ }
+ return NItem;
+}
+
+static Value *generateNewInstTree(ArrayRef<InstLane> Item, FixedVectorType *Ty,
+ const SmallPtrSet<Value *, 4> &IdentityLeafs,
+ const SmallPtrSet<Value *, 4> &SplatLeafs,
+ IRBuilder<> &Builder) {
+ auto [FrontV, FrontLane] = Item.front();
+
+ if (IdentityLeafs.contains(FrontV) &&
+ all_of(drop_begin(enumerate(Item)), [Item](const auto &E) {
+ Value *FrontV = Item.front().first;
+ auto [V, Lane] = E.value();
+ return !V || (V == FrontV && Lane == (int)E.index());
+ })) {
+ return FrontV;
+ }
+ if (SplatLeafs.contains(FrontV)) {
+ if (auto *ILI = dyn_cast<Instruction>(FrontV))
+ Builder.SetInsertPoint(*ILI->getInsertionPointAfterDef());
+ else if (auto *Arg = dyn_cast<Argument>(FrontV))
+ Builder.SetInsertPointPastAllocas(Arg->getParent());
+ SmallVector<int, 16> Mask(Ty->getNumElements(), FrontLane);
+ return Builder.CreateShuffleVector(FrontV, Mask);
+ }
+
+ auto *I = cast<Instruction>(FrontV);
+ auto *II = dyn_cast<IntrinsicInst>(I);
+ unsigned NumOps = I->getNumOperands() - (II ? 1 : 0);
+ SmallVector<Value *> Ops(NumOps);
+ for (unsigned Idx = 0; Idx < NumOps; Idx++) {
+ if (II && isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Idx)) {
+ Ops[Idx] = II->getOperand(Idx);
+ continue;
+ }
+ Ops[Idx] = generateNewInstTree(generateInstLaneVectorFromOperand(Item, Idx),
+ Ty, IdentityLeafs, SplatLeafs, Builder);
+ }
+ Builder.SetInsertPoint(I);
+ Type *DstTy =
+ FixedVectorType::get(I->getType()->getScalarType(), Ty->getNumElements());
+ if (auto *BI = dyn_cast<BinaryOperator>(I))
+ return Builder.CreateBinOp((Instruction::BinaryOps)BI->getOpcode(), Ops[0],
+ Ops[1]);
+ if (II)
+ return Builder.CreateIntrinsic(DstTy, II->getIntrinsicID(), Ops);
+ assert(isa<UnaryInstruction>(I) && "Unexpected instruction type in Generate");
+ return Builder.CreateUnOp((Instruction::UnaryOps)I->getOpcode(), Ops[0]);
+}
+
// Starting from a shuffle, look up through operands tracking the shuffled index
// of each lane. If we can simplify away the shuffles to identities then
// do so.
@@ -1677,42 +1757,9 @@ bool VectorCombine::foldShuffleToIdentity(Instruction &I) {
!isa<Instruction>(I.getOperand(1)))
return false;
- using InstLane = std::pair<Value *, int>;
-
- auto LookThroughShuffles = [](Value *V, int Lane) -> InstLane {
- while (auto *SV = dyn_cast<ShuffleVectorInst>(V)) {
- unsigned NumElts =
- cast<FixedVectorType>(SV->getOperand(0)->getType())->getNumElements();
- int M = SV->getMaskValue(Lane);
- if (M < 0)
- return {nullptr, PoisonMaskElem};
- else if (M < (int)NumElts) {
- V = SV->getOperand(0);
- Lane = M;
- } else {
- V = SV->getOperand(1);
- Lane = M - NumElts;
- }
- }
- return InstLane{V, Lane};
- };
-
- auto GenerateInstLaneVectorFromOperand =
- [&LookThroughShuffles](ArrayRef<InstLane> Item, int Op) {
- SmallVector<InstLane> NItem;
- for (InstLane V : Item) {
- NItem.emplace_back(
- !V.first
- ? InstLane{nullptr, PoisonMaskElem}
- : LookThroughShuffles(
- cast<Instruction>(V.first)->getOperand(Op), V.second));
- }
- return NItem;
- };
-
SmallVector<InstLane> Start(Ty->getNumElements());
for (unsigned M = 0, E = Ty->getNumElements(); M < E; ++M)
- Start[M] = LookThroughShuffles(&I, M);
+ Start[M] = lookThroughShuffles(&I, M);
SmallVector<SmallVector<InstLane>> Worklist;
Worklist.push_back(Start);
@@ -1720,74 +1767,80 @@ bool VectorCombine::foldShuffleToIdentity(Instruction &I) {
unsigned NumVisited = 0;
while (!Worklist.empty()) {
- SmallVector<InstLane> Item = Worklist.pop_back_val();
if (++NumVisited > MaxInstrsToScan)
return false;
+ SmallVector<InstLane> Item = Worklist.pop_back_val();
+ auto [FrontV, FrontLane] = Item.front();
+
// If we found an undef first lane then bail out to keep things simple.
- if (!Item[0].first)
+ if (!FrontV)
return false;
// Look for an identity value.
- if (Item[0].second == 0 &&
- cast<FixedVectorType>(Item[0].first->getType())->getNumElements() ==
+ if (!FrontLane &&
+ cast<FixedVectorType>(FrontV->getType())->getNumElements() ==
Ty->getNumElements() &&
- all_of(drop_begin(enumerate(Item)), [&](const auto &E) {
- return !E.value().first || (E.value().first == Item[0].first &&
+ all_of(drop_begin(enumerate(Item)), [Item](const auto &E) {
+ Value *FrontV = Item.front().first;
+ return !E.value().first || (E.value().first == FrontV &&
E.value().second == (int)E.index());
})) {
- IdentityLeafs.insert(Item[0].first);
+ IdentityLeafs.insert(FrontV);
continue;
}
// Look for a splat value.
- if (all_of(drop_begin(Item), [&](InstLane &IL) {
- return !IL.first ||
- (IL.first == Item[0].first && IL.second == Item[0].second);
+ if (all_of(drop_begin(Item), [Item](InstLane &IL) {
+ auto [FrontV, FrontLane] = Item.front();
+ auto [V, Lane] = IL;
+ return !V || (V == FrontV && Lane == FrontLane);
})) {
- SplatLeafs.insert(Item[0].first);
+ SplatLeafs.insert(FrontV);
continue;
}
// We need each element to be the same type of value, and check that each
// element has a single use.
- if (!all_of(drop_begin(Item), [&](InstLane IL) {
- if (!IL.first)
+ if (!all_of(drop_begin(Item), [Item](InstLane IL) {
+ Value *FrontV = Item.front().first;
+ Value *V = IL.first;
+ if (!V)
return true;
- if (auto *I = dyn_cast<Instruction>(IL.first); I && !I->hasOneUse())
+ if (auto *I = dyn_cast<Instruction>(V); I && !I->hasOneUse())
return false;
- if (IL.first->getValueID() != Item[0].first->getValueID())
+ if (V->getValueID() != FrontV->getValueID())
return false;
- if (isa<CallInst>(IL.first) && !isa<IntrinsicInst>(IL.first))
+ if (isa<CallInst>(V) && !isa<IntrinsicInst>(V))
return false;
- auto *II = dyn_cast<IntrinsicInst>(IL.first);
- return !II ||
- (isa<IntrinsicInst>(Item[0].first) &&
- II->getIntrinsicID() ==
- cast<IntrinsicInst>(Item[0].first)->getIntrinsicID());
+ auto *II = dyn_cast<IntrinsicInst>(V);
+ return !II || (isa<IntrinsicInst>(FrontV) &&
+ II->getIntrinsicID() ==
+ cast<IntrinsicInst>(FrontV)->getIntrinsicID());
}))
return false;
// Check the operator is one that we support. We exclude div/rem in case
// they hit UB from poison lanes.
- if (isa<BinaryOperator>(Item[0].first) &&
- !cast<BinaryOperator>(Item[0].first)->isIntDivRem()) {
- Worklist.push_back(GenerateInstLaneVectorFromOperand(Item, 0));
- Worklist.push_back(GenerateInstLaneVectorFromOperand(Item, 1));
- } else if (isa<UnaryOperator>(Item[0].first)) {
- Worklist.push_back(GenerateInstLaneVectorFromOperand(Item, 0));
- } else if (auto *II = dyn_cast<IntrinsicInst>(Item[0].first);
+ if (isa<BinaryOperator>(FrontV) &&
+ !cast<BinaryOperator>(FrontV)->isIntDivRem()) {
+ Worklist.push_back(generateInstLaneVectorFromOperand(Item, 0));
+ Worklist.push_back(generateInstLaneVectorFromOperand(Item, 1));
+ } else if (isa<UnaryOperator>(FrontV)) {
+ Worklist.push_back(generateInstLaneVectorFromOperand(Item, 0));
+ } else if (auto *II = dyn_cast<IntrinsicInst>(FrontV);
II && isTriviallyVectorizable(II->getIntrinsicID())) {
for (unsigned Op = 0, E = II->getNumOperands() - 1; Op < E; Op++) {
if (isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Op)) {
- if (!all_of(drop_begin(Item), [&](InstLane &IL) {
- return !IL.first ||
- (cast<Instruction>(IL.first)->getOperand(Op) ==
- cast<Instruction>(Item[0].first)->getOperand(Op));
+ if (!all_of(drop_begin(Item), [Item, Op](InstLane &IL) {
+ Value *FrontV = Item.front().first;
+ Value *V = IL.first;
+ return !V || (cast<Instruction>(V)->getOperand(Op) ==
+ cast<Instruction>(FrontV)->getOperand(Op));
}))
return false;
continue;
}
- Worklist.push_back(GenerateInstLaneVectorFromOperand(Item, Op));
+ Worklist.push_back(generateInstLaneVectorFromOperand(Item, Op));
}
} else {
return false;
@@ -1799,49 +1852,7 @@ bool VectorCombine::foldShuffleToIdentity(Instruction &I) {
// If we got this far, we know the shuffles are superfluous and can be
// removed. Scan through again and generate the new tree of instructions.
- std::function<Value *(ArrayRef<InstLane>)> Generate =
- [&](ArrayRef<InstLane> Item) -> Value * {
- if (IdentityLeafs.contains(Item[0].first) &&
- all_of(drop_begin(enumerate(Item)), [&](const auto &E) {
- return !E.value().first || (E.value().first == Item[0].first &&
- E.value().second == (int)E.index());
- })) {
- return Item[0].first;
- }
- if (SplatLeafs.contains(Item[0].first)) {
- if (auto ILI = dyn_cast<Instruction>(Item[0].first))
- Builder.SetInsertPoint(*ILI->getInsertionPointAfterDef());
- else if (isa<Argument>(Item[0].first))
- Builder.SetInsertPointPastAllocas(I.getParent()->getParent());
- SmallVector<int, 16> Mask(Ty->getNumElements(), Item[0].second);
- return Builder.CreateShuffleVector(Item[0].first, Mask);
- }
-
- auto *I = cast<Instruction>(Item[0].first);
- auto *II = dyn_cast<IntrinsicInst>(I);
- unsigned NumOps = I->getNumOperands() - (II ? 1 : 0);
- SmallVector<Value *> Ops(NumOps);
- for (unsigned Idx = 0; Idx < NumOps; Idx++) {
- if (II && isVectorIntrinsicWithScalarOpAtArg(II->getIntrinsicID(), Idx)) {
- Ops[Idx] = II->getOperand(Idx);
- continue;
- }
- Ops[Idx] = Generate(GenerateInstLaneVectorFromOperand(Item, Idx));
- }
- Builder.SetInsertPoint(I);
- Type *DstTy = FixedVectorType::get(I->getType()->getScalarType(),
- Ty->getNumElements());
- if (auto BI = dyn_cast<BinaryOperator>(I))
- return Builder.CreateBinOp((Instruction::BinaryOps)BI->getOpcode(),
- Ops[0], Ops[1]);
- if (II)
- return Builder.CreateIntrinsic(DstTy, II->getIntrinsicID(), Ops);
- assert(isa<UnaryInstruction>(I) &&
- "Unexpected instruction type in Generate");
- return Builder.CreateUnOp((Instruction::UnaryOps)I->getOpcode(), Ops[0]);
- };
-
- Value *V = Generate(Start);
+ Value *V = generateNewInstTree(Start, Ty, IdentityLeafs, SplatLeafs, Builder);
replaceValue(I, *V);
return true;
}
diff --git a/llvm/test/Analysis/CostModel/AMDGPU/shufflevector.ll b/llvm/test/Analysis/CostModel/AMDGPU/shufflevector.ll
index be5cca0765ed..a18156744a36 100644
--- a/llvm/test/Analysis/CostModel/AMDGPU/shufflevector.ll
+++ b/llvm/test/Analysis/CostModel/AMDGPU/shufflevector.ll
@@ -7,603 +7,1140 @@
; RUN: opt < %s -passes="print<cost-model>" 2>&1 -disable-output -mtriple=amdgcn-unknown-amdhsa -mcpu=fiji -cost-kind=code-size -S | FileCheck -check-prefixes=ALL-SIZE,VI-SIZE %s
; END.
-define amdgpu_kernel void @shufflevector_i16() {
+define amdgpu_kernel void @shufflevector_i16(<2 x i16> %vec1, <2 x i16> %vec2) {
; GFX9-10-LABEL: 'shufflevector_i16'
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> zeroinitializer
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 3>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 3>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 3>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 3>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf000 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> zeroinitializer
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf222 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> zeroinitializer
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> zeroinitializer
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> zeroinitializer
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 3>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> zeroinitializer
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; GFX9-10-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; VI-LABEL: 'shufflevector_i16'
-; VI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf00 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> zeroinitializer
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf10 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf11 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf22 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 3>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf33 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 3>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 3>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 3>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf32 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf000 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> zeroinitializer
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf222 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf00 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> zeroinitializer
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf10 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf11 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf22 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf33 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf32 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> zeroinitializer
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf00_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> zeroinitializer
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf10_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf11_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf22_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf33_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 3>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf32_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> zeroinitializer
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; VI-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; GFX9-10-SIZE-LABEL: 'shufflevector_i16'
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> zeroinitializer
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 3>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 3>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 3>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 3>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf000 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> zeroinitializer
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf222 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> zeroinitializer
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> zeroinitializer
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> zeroinitializer
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 3>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> zeroinitializer
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
; VI-SIZE-LABEL: 'shufflevector_i16'
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf00 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> zeroinitializer
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf10 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf11 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf22 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 3>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf33 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 3>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 3>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 3>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf32 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf000 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> zeroinitializer
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf222 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf00 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> zeroinitializer
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf10 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf11 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf22 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf33 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf32 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> zeroinitializer
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf00_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> zeroinitializer
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf10_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf11_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf02_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %shuf20_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf22_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf03_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf30_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf33_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf12_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf21_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf13_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf31_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 3>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf32_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> zeroinitializer
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf001_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf010_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf011_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf100_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf101_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf110_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf111_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf002_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf020_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf022_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf200_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf202_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf220_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf112_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf121_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf122_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf211_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf212_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf221_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
- %shuf00 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> zeroinitializer
- %shuf01 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 1>
- %shuf10 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
- %shuf11 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 1>
- %shuf02 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 2>
- %shuf20 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 0>
- %shuf22 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 2>
- %shuf03 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 0, i32 3>
- %shuf30 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 0>
- %shuf33 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 3>
- %shuf12 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 2>
- %shuf21 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 1>
- %shuf13 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 3>
- %shuf31 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 1>
- %shuf23 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 2, i32 3>
- %shuf32 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 3, i32 2>
- %shuf000 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 0>
- %shuf001 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 1>
- %shuf010 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 0>
- %shuf011 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 1, i32 1>
- %shuf100 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 0>
- %shuf101 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 0, i32 1>
- %shuf110 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 0>
- %shuf111 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 1>
- %shuf002 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 0, i32 2>
- %shuf020 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 0>
- %shuf022 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 0, i32 2, i32 2>
- %shuf200 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 0>
- %shuf202 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 0, i32 2>
- %shuf220 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 0>
- %shuf222 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 2>
- %shuf112 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 1, i32 2>
- %shuf121 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 1>
- %shuf122 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 1, i32 2, i32 2>
- %shuf211 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 1>
- %shuf212 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 1, i32 2>
- %shuf221 = shufflevector <2 x i16> undef, <2 x i16> undef, <3 x i32> <i32 2, i32 2, i32 1>
+ %shuf00 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> zeroinitializer
+ %shuf01 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 1>
+ %shuf10 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 0>
+ %shuf11 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 1>
+ %shuf02 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 2>
+ %shuf20 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 0>
+ %shuf22 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 2>
+ %shuf03 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 0, i32 3>
+ %shuf30 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 0>
+ %shuf33 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 3>
+ %shuf12 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 2>
+ %shuf21 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 1>
+ %shuf13 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 1, i32 3>
+ %shuf31 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 1>
+ %shuf23 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 2, i32 3>
+ %shuf32 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <2 x i32> <i32 3, i32 2>
+ %shuf000 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 0>
+ %shuf001 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+ %shuf010 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+ %shuf011 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+ %shuf100 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+ %shuf101 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+ %shuf110 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+ %shuf111 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+ %shuf002 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+ %shuf020 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+ %shuf022 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+ %shuf200 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+ %shuf202 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+ %shuf220 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+ %shuf222 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+ %shuf112 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+ %shuf121 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+ %shuf122 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+ %shuf211 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+ %shuf212 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+ %shuf221 = shufflevector <2 x i16> %vec1, <2 x i16> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+ %shuf00_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> zeroinitializer
+ %shuf01_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 1>
+ %shuf10_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 0>
+ %shuf11_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 1>
+ %shuf02_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 2>
+ %shuf20_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 0>
+ %shuf22_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 2>
+ %shuf03_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 0, i32 3>
+ %shuf30_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 0>
+ %shuf33_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 3>
+ %shuf12_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 2>
+ %shuf21_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 1>
+ %shuf13_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 1, i32 3>
+ %shuf31_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 1>
+ %shuf23_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 2, i32 3>
+ %shuf32_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <2 x i32> <i32 3, i32 2>
+ %shuf000_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 0>
+ %shuf001_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+ %shuf010_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+ %shuf011_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+ %shuf100_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+ %shuf101_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+ %shuf110_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+ %shuf111_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+ %shuf002_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+ %shuf020_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+ %shuf022_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+ %shuf200_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+ %shuf202_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+ %shuf220_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+ %shuf222_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+ %shuf112_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+ %shuf121_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+ %shuf122_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+ %shuf211_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+ %shuf212_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+ %shuf221_2 = shufflevector <2 x i16> %vec1, <2 x i16> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
ret void
}
; Should not assert
-define amdgpu_kernel void @shufflevector_i8() {
+define amdgpu_kernel void @shufflevector_i8(<2 x i8> %vec1, <2 x i8> %vec2) {
; ALL-LABEL: 'shufflevector_i8'
-; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf00 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> zeroinitializer
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf10 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf11 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf02 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf20 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf22 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf03 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf30 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf33 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf12 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf21 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf13 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf31 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf32 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> zeroinitializer
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf001 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf010 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf011 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf100 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf101 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf110 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf111 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf002 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf020 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf022 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf200 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf202 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf220 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf112 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf121 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf122 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf211 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf212 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf221 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf00 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf10 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf11 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf02 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf20 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf22 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf03 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf30 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf33 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf12 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf21 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf13 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf31 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf32 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf000 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf001 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf010 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf011 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf100 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf101 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf110 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf111 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf002 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf020 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf022 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf200 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf202 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf220 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf222 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf112 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf121 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf122 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf211 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf212 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf221 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf00_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf10_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf11_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf02_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf20_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf22_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf03_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf30_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf33_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf12_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf21_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf13_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf31_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf32_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf000_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf001_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf010_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf011_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf100_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf101_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf110_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf111_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf002_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf020_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf022_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf200_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf202_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf220_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf222_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf112_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf121_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf122_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf211_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf212_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf221_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; ALL-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; ALL-SIZE-LABEL: 'shufflevector_i8'
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf00 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> zeroinitializer
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf10 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf11 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf02 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf20 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf22 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf03 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf30 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf33 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf12 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf21 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf13 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf31 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf32 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf000 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> zeroinitializer
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf001 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf010 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf011 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf100 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf101 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf110 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf111 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf002 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf020 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf022 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf200 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf202 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf220 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf222 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf112 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf121 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf122 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf211 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf212 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf221 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf00 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf10 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf11 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf02 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf20 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf22 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf03 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf30 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf33 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf12 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf21 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf13 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf31 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf32 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf000 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf001 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf010 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf011 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf100 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf101 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf110 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf111 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf002 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf020 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf022 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf200 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf202 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf220 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf222 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf112 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf121 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf122 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf211 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf212 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf221 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf00_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf10_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf11_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf02_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %shuf20_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %shuf22_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf03_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf30_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf33_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf12_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf21_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf13_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf31_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %shuf32_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf000_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf001_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf010_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf011_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf100_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf101_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf110_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf111_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf002_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf020_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf022_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf200_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf202_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf220_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf222_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf112_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf121_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf122_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf211_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf212_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %shuf221_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
- %shuf00 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> zeroinitializer
- %shuf01 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 1>
- %shuf10 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
- %shuf11 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 1>
- %shuf02 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 2>
- %shuf20 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 0>
- %shuf22 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 2>
- %shuf03 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 0, i32 3>
- %shuf30 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 0>
- %shuf33 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 3>
- %shuf12 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 2>
- %shuf21 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 1>
- %shuf13 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 3>
- %shuf31 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 1>
- %shuf23 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 2, i32 3>
- %shuf32 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 3, i32 2>
- %shuf000 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 0>
- %shuf001 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 1>
- %shuf010 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 1, i32 0>
- %shuf011 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 1, i32 1>
- %shuf100 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 0, i32 0>
- %shuf101 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 0, i32 1>
- %shuf110 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 0>
- %shuf111 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 1>
- %shuf002 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 0, i32 2>
- %shuf020 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 2, i32 0>
- %shuf022 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 0, i32 2, i32 2>
- %shuf200 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 0, i32 0>
- %shuf202 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 0, i32 2>
- %shuf220 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 0>
- %shuf222 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 2>
- %shuf112 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 1, i32 2>
- %shuf121 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 2, i32 1>
- %shuf122 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 1, i32 2, i32 2>
- %shuf211 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 1, i32 1>
- %shuf212 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 1, i32 2>
- %shuf221 = shufflevector <2 x i8> undef, <2 x i8> undef, <3 x i32> <i32 2, i32 2, i32 1>
+ %shuf00 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> zeroinitializer
+ %shuf01 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 1>
+ %shuf10 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 0>
+ %shuf11 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 1>
+ %shuf02 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 2>
+ %shuf20 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 0>
+ %shuf22 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 2>
+ %shuf03 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 0, i32 3>
+ %shuf30 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 0>
+ %shuf33 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 3>
+ %shuf12 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 2>
+ %shuf21 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 1>
+ %shuf13 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 1, i32 3>
+ %shuf31 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 1>
+ %shuf23 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 2, i32 3>
+ %shuf32 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <2 x i32> <i32 3, i32 2>
+ %shuf000 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 0>
+ %shuf001 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+ %shuf010 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+ %shuf011 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+ %shuf100 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+ %shuf101 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+ %shuf110 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+ %shuf111 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+ %shuf002 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+ %shuf020 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+ %shuf022 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+ %shuf200 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+ %shuf202 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+ %shuf220 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+ %shuf222 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+ %shuf112 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+ %shuf121 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+ %shuf122 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+ %shuf211 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+ %shuf212 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+ %shuf221 = shufflevector <2 x i8> %vec1, <2 x i8> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+ %shuf00_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> zeroinitializer
+ %shuf01_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 1>
+ %shuf10_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 0>
+ %shuf11_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 1>
+ %shuf02_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 2>
+ %shuf20_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 0>
+ %shuf22_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 2>
+ %shuf03_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 0, i32 3>
+ %shuf30_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 0>
+ %shuf33_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 3>
+ %shuf12_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 2>
+ %shuf21_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 1>
+ %shuf13_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 1, i32 3>
+ %shuf31_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 1>
+ %shuf23_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 2, i32 3>
+ %shuf32_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <2 x i32> <i32 3, i32 2>
+ %shuf000_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 0>
+ %shuf001_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+ %shuf010_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+ %shuf011_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+ %shuf100_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+ %shuf101_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+ %shuf110_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+ %shuf111_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+ %shuf002_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+ %shuf020_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+ %shuf022_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+ %shuf200_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+ %shuf202_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+ %shuf220_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+ %shuf222_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+ %shuf112_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+ %shuf121_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+ %shuf122_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+ %shuf211_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+ %shuf212_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+ %shuf221_2 = shufflevector <2 x i8> %vec1, <2 x i8> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
ret void
}
-define amdgpu_kernel void @shufflevector_i32() {
+define amdgpu_kernel void @shufflevector_i32(<2 x i32> %vec1, <2 x i32> %vec2) {
; ALL-LABEL: 'shufflevector_i32'
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> zeroinitializer
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf02 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf20 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf03 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf30 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf12 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf21 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf13 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf31 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf000 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> zeroinitializer
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf001 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf010 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf011 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf100 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf101 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf110 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf111 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf002 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf020 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf022 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf200 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf202 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf220 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf222 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf112 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf121 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf122 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf211 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf212 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf221 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf02 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf20 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf03 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf30 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf12 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf21 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf13 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf31 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf000 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf001 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf010 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf011 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf100 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf101 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf110 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf111 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf002 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf020 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf022 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf200 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf202 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf220 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf222 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf112 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf121 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf122 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf211 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf212 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf221 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf02_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf20_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf03_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf30_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf12_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf21_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf13_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf31_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf000_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> zeroinitializer
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf001_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf010_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf011_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf100_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf101_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf110_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf111_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf002_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf020_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf022_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf200_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf202_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf220_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf222_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf112_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf121_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf122_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf211_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf212_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf221_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; ALL-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; ALL-SIZE-LABEL: 'shufflevector_i32'
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> zeroinitializer
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf02 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf20 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf03 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf30 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf12 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf21 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf13 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf31 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf000 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> zeroinitializer
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf001 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf010 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 1, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf011 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf100 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf101 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf110 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf111 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf002 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf020 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 2, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf022 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf200 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 0, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf202 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 0, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf220 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 0>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf222 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf112 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf121 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 2, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf122 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 2, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf211 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 1, i32 1>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf212 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 1, i32 2>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf221 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf02 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf20 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf03 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf30 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf12 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf21 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf13 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf31 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf000 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf001 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf010 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf011 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf100 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf101 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf110 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf111 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf002 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf020 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf022 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf200 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf202 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf220 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf222 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf112 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf121 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf122 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf211 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf212 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf221 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf00_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf01_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf10_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf11_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf02_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf20_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf22_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf03_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf30_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf33_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf12_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf21_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf13_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf31_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf23_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf32_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf000_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> zeroinitializer
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf001_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf010_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf011_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf100_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf101_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf110_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf111_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf002_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf020_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf022_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf200_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf202_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf220_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf222_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf112_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf121_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf122_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf211_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf212_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %shuf221_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
- %shuf00 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> zeroinitializer
- %shuf01 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 1>
- %shuf10 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
- %shuf11 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 1>
- %shuf02 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 2>
- %shuf20 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 0>
- %shuf22 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 2>
- %shuf03 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 0, i32 3>
- %shuf30 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 0>
- %shuf33 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 3>
- %shuf12 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 2>
- %shuf21 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 1>
- %shuf13 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 3>
- %shuf31 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 1>
- %shuf23 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 2, i32 3>
- %shuf32 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 3, i32 2>
- %shuf000 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 0>
- %shuf001 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 1>
- %shuf010 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 1, i32 0>
- %shuf011 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 1, i32 1>
- %shuf100 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 0>
- %shuf101 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 0, i32 1>
- %shuf110 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 0>
- %shuf111 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 1>
- %shuf002 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 0, i32 2>
- %shuf020 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 2, i32 0>
- %shuf022 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 0, i32 2, i32 2>
- %shuf200 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 0, i32 0>
- %shuf202 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 0, i32 2>
- %shuf220 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 0>
- %shuf222 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 2>
- %shuf112 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 1, i32 2>
- %shuf121 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 2, i32 1>
- %shuf122 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 1, i32 2, i32 2>
- %shuf211 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 1, i32 1>
- %shuf212 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 1, i32 2>
- %shuf221 = shufflevector <2 x i32> undef, <2 x i32> undef, <3 x i32> <i32 2, i32 2, i32 1>
+ %shuf00 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> zeroinitializer
+ %shuf01 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 1>
+ %shuf10 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 0>
+ %shuf11 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 1>
+ %shuf02 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 2>
+ %shuf20 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 0>
+ %shuf22 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 2>
+ %shuf03 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 0, i32 3>
+ %shuf30 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 0>
+ %shuf33 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 3>
+ %shuf12 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 2>
+ %shuf21 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 1>
+ %shuf13 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 1, i32 3>
+ %shuf31 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 1>
+ %shuf23 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 2, i32 3>
+ %shuf32 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <2 x i32> <i32 3, i32 2>
+ %shuf000 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 0>
+ %shuf001 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 1>
+ %shuf010 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 1, i32 0>
+ %shuf011 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 1, i32 1>
+ %shuf100 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 0, i32 0>
+ %shuf101 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 0, i32 1>
+ %shuf110 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 0>
+ %shuf111 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 1>
+ %shuf002 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 0, i32 2>
+ %shuf020 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 2, i32 0>
+ %shuf022 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 0, i32 2, i32 2>
+ %shuf200 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 0, i32 0>
+ %shuf202 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 0, i32 2>
+ %shuf220 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 0>
+ %shuf222 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 2>
+ %shuf112 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 1, i32 2>
+ %shuf121 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 2, i32 1>
+ %shuf122 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 1, i32 2, i32 2>
+ %shuf211 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 1, i32 1>
+ %shuf212 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 1, i32 2>
+ %shuf221 = shufflevector <2 x i32> %vec1, <2 x i32> %vec1, <3 x i32> <i32 2, i32 2, i32 1>
+ %shuf00_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> zeroinitializer
+ %shuf01_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 1>
+ %shuf10_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 0>
+ %shuf11_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 1>
+ %shuf02_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 2>
+ %shuf20_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 0>
+ %shuf22_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 2>
+ %shuf03_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 0, i32 3>
+ %shuf30_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 0>
+ %shuf33_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 3>
+ %shuf12_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 2>
+ %shuf21_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 1>
+ %shuf13_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 1, i32 3>
+ %shuf31_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 1>
+ %shuf23_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 2, i32 3>
+ %shuf32_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <2 x i32> <i32 3, i32 2>
+ %shuf000_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 0>
+ %shuf001_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 1>
+ %shuf010_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 1, i32 0>
+ %shuf011_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 1, i32 1>
+ %shuf100_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 0, i32 0>
+ %shuf101_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 0, i32 1>
+ %shuf110_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 0>
+ %shuf111_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 1>
+ %shuf002_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 0, i32 2>
+ %shuf020_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 2, i32 0>
+ %shuf022_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 0, i32 2, i32 2>
+ %shuf200_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 0, i32 0>
+ %shuf202_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 0, i32 2>
+ %shuf220_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 0>
+ %shuf222_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 2>
+ %shuf112_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 1, i32 2>
+ %shuf121_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 2, i32 1>
+ %shuf122_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 1, i32 2, i32 2>
+ %shuf211_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 1, i32 1>
+ %shuf212_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 1, i32 2>
+ %shuf221_2 = shufflevector <2 x i32> %vec1, <2 x i32> %vec2, <3 x i32> <i32 2, i32 2, i32 1>
ret void
}
; Other shuffle cases
-define void @shuffle() {
+define void @shuffle(<2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i8> %i8v4, <4 x i8> %i8v4_2, <6 x i8> %i8v6, <6 x i8> %i8v6_2, <8 x i8> %i8v8, <8 x i8> %i8v8_2, <16 x i8> %i8v16, <16 x i8> %i8v16_2, <2 x i16> %i16v2, <2 x i16> %i16v2_2, <4 x i16> %i16v4, <4 x i16> %i16v4_2, <8 x i16> %i16v8, <8 x i16> %i16v8_2, <2 x i32> %i32v2, <2 x i32> %i32v2_2, <4 x i32> %i32v4, <4 x i32> %i32v4_2, <2 x float> %floatv2, <2 x float> %floatv2_2, <4 x float> %floatv4, <4 x float> %floatv4_2,<2 x i64> %i64v2, <2 x i64> %i64v2_2,<2 x double> %doublev2, <2 x double> %doublev2_2) {
; GFX9-10-LABEL: 'shuffle'
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> undef, <4 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> undef, <2 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> undef, <6 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> undef, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_2 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i16_4 = shufflevector <4 x i16> undef, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v8i16_8 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8_2 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16_2 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16_2, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_2_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <2 x i32> <i32 1, i32 0>
; GFX9-10-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; VI-LABEL: 'shuffle'
-; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> undef, <4 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> undef, <2 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> undef, <6 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> undef, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i16_2 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i16_4 = shufflevector <4 x i16> undef, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v8i16_8 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
-; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8_2 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16_2 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16_2, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i16_2_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <2 x i32> <i32 1, i32 0>
+; VI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <2 x i32> <i32 1, i32 0>
; VI-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; GFX9-10-SIZE-LABEL: 'shuffle'
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> undef, <4 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> undef, <2 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> undef, <6 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> undef, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_2 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i16_4 = shufflevector <4 x i16> undef, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v8i16_8 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
-; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8_2 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16_2 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16_2, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i16_2_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <2 x i32> <i32 1, i32 0>
+; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <2 x i32> <i32 1, i32 0>
; GFX9-10-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
; VI-SIZE-LABEL: 'shuffle'
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> undef, <4 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> undef, <2 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> undef, <6 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> undef, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i16_2 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %v4i16_4 = shufflevector <4 x i16> undef, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 14 for instruction: %v8i16_8 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
-; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v2i8_2_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v2i8_4_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v4i8_4_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v2i8_8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v4i8_8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v6i8_8_2 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v8i8_8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 32 for instruction: %v16i8_16_2 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16_2, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %v2i16_2_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i16_4_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i16_8_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i32_2_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_4_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f32_2_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_4_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2i64_2_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <2 x i32> <i32 1, i32 0>
+; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v2f64_2_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <2 x i32> <i32 1, i32 0>
; VI-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
- %v2i8_2 = shufflevector <2 x i8> undef, <2 x i8> undef, <2 x i32> <i32 1, i32 0>
- %v2i8_4 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
- %v4i8_4 = shufflevector <4 x i8> undef, <4 x i8> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
- %v2i8_8 = shufflevector <2 x i8> undef, <2 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
- %v4i8_8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
- %v6i8_8 = shufflevector <6 x i8> undef, <6 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
- %v8i8_8 = shufflevector <8 x i8> undef, <8 x i8> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
- %v16i8_16 = shufflevector <16 x i8> undef, <16 x i8> undef, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
- %v2i16_2 = shufflevector <2 x i16> undef, <2 x i16> undef, <2 x i32> <i32 1, i32 0>
- %v4i16_4 = shufflevector <4 x i16> undef, <4 x i16> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
- %v8i16_8 = shufflevector <8 x i16> undef, <8 x i16> undef, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
- %v2i32_2 = shufflevector <2 x i32> undef, <2 x i32> undef, <2 x i32> <i32 1, i32 0>
- %v4i32_4 = shufflevector <4 x i32> undef, <4 x i32> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
- %v2f32_2 = shufflevector <2 x float> undef, <2 x float> undef, <2 x i32> <i32 1, i32 0>
- %v4f32_4 = shufflevector <4 x float> undef, <4 x float> undef, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
- %v2i64_2 = shufflevector <2 x i64> undef, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
- %v2f64_2 = shufflevector <2 x double> undef, <2 x double> undef, <2 x i32> <i32 1, i32 0>
+ %v2i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <2 x i32> <i32 1, i32 0>
+ %v2i8_2_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <2 x i32> <i32 1, i32 0>
+ %v2i8_4 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v2i8_4_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v4i8_4 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v4i8_4_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v2i8_8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v2i8_8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v4i8_8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v4i8_8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v6i8_8 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v6i8_8_2 = shufflevector <6 x i8> %i8v6, <6 x i8> %i8v6_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v8i8_8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v8i8_8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v16i8_16 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v16i8_16_2 = shufflevector <16 x i8> %i8v16, <16 x i8> %i8v16_2, <16 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v2i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <2 x i32> <i32 1, i32 0>
+ %v2i16_2_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <2 x i32> <i32 1, i32 0>
+ %v4i16_4 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v4i16_4_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v8i16_8 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v8i16_8_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <8 x i32> <i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 0, i32 1>
+ %v2i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <2 x i32> <i32 1, i32 0>
+ %v2i32_2_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <2 x i32> <i32 1, i32 0>
+ %v4i32_4 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v4i32_4_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v2f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <2 x i32> <i32 1, i32 0>
+ %v2f32_2_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <2 x i32> <i32 1, i32 0>
+ %v4f32_4 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v4f32_4_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <4 x i32> <i32 1, i32 3, i32 2, i32 0>
+ %v2i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <2 x i32> <i32 1, i32 0>
+ %v2i64_2_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <2 x i32> <i32 1, i32 0>
+ %v2f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <2 x i32> <i32 1, i32 0>
+ %v2f64_2_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <2 x i32> <i32 1, i32 0>
ret void
}
-define void @concat() {
+define void @concat(<2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i8> %i8v8, <8 x i8> %i8v8_2, <2 x half> %halfv2, <2 x half> %halfv2_2, <4 x half> %halfv4, <4 x half> %halfv4_2, <8 x half> %halfv8, <8 x half> %halfv8_2, <2 x i16> %i16v2, <2 x i16> %i16v2_2, <4 x i16> %i16v4, <4 x i16> %i16v4_2, <8 x i16> %i16v8, <8 x i16> %i16v8_2, <2 x i32> %i32v2, <2 x i32> %i32v2_2, <4 x i32> %i32v4, <4 x i32> %i32v4_2, <2 x float> %floatv2, <2 x float> %floatv2_2, <4 x float> %floatv4, <4 x float> %floatv4_2,<2 x i64> %i64v2, <2 x i64> %i64v2_2,<2 x double> %doublev2, <2 x double> %doublev2_2) {
; ALL-LABEL: 'concat'
-; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i8 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i8 = shufflevector <8 x i8> undef, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i16 = shufflevector <2 x i16> undef, <2 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8i16 = shufflevector <4 x i16> undef, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v16i16 = shufflevector <8 x i16> undef, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32 = shufflevector <2 x i32> undef, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32 = shufflevector <4 x i32> undef, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64 = shufflevector <2 x i64> undef, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4f16 = shufflevector <2 x half> undef, <2 x half> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8f16 = shufflevector <4 x half> undef, <4 x half> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v16f16 = shufflevector <8 x half> undef, <8 x half> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32 = shufflevector <2 x float> undef, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f32 = shufflevector <4 x float> undef, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f64 = shufflevector <2 x double> undef, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i16 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i16 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16i16 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f16 = shufflevector <2 x half> %halfv2, <2 x half> %halfv2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f16 = shufflevector <4 x half> %halfv4, <4 x half> %halfv4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16f16 = shufflevector <8 x half> %halfv8, <8 x half> %halfv8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f32 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f64 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i16_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16i16_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f16_2 = shufflevector <2 x half> %halfv2, <2 x half> %halfv2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f16_2 = shufflevector <4 x half> %halfv4, <4 x half> %halfv4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16f16_2 = shufflevector <8 x half> %halfv8, <8 x half> %halfv8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f32_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; ALL-NEXT: Cost Model: Found an estimated cost of 10 for instruction: ret void
;
; ALL-SIZE-LABEL: 'concat'
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i8 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i8 = shufflevector <8 x i8> undef, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4i16 = shufflevector <2 x i16> undef, <2 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8i16 = shufflevector <4 x i16> undef, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v16i16 = shufflevector <8 x i16> undef, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32 = shufflevector <2 x i32> undef, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32 = shufflevector <4 x i32> undef, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64 = shufflevector <2 x i64> undef, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %v4f16 = shufflevector <2 x half> undef, <2 x half> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %v8f16 = shufflevector <4 x half> undef, <4 x half> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 15 for instruction: %v16f16 = shufflevector <8 x half> undef, <8 x half> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32 = shufflevector <2 x float> undef, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f32 = shufflevector <4 x float> undef, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
-; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f64 = shufflevector <2 x double> undef, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i16 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i16 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16i16 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f16 = shufflevector <2 x half> %halfv2, <2 x half> %halfv2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f16 = shufflevector <4 x half> %halfv4, <4 x half> %halfv4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16f16 = shufflevector <8 x half> %halfv8, <8 x half> %halfv8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f32 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f64 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %v4i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %v8i8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %v16i8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i16_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16i16_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8i32_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f16_2 = shufflevector <2 x half> %halfv2, <2 x half> %halfv2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f16_2 = shufflevector <4 x half> %halfv4, <4 x half> %halfv4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v16f16_2 = shufflevector <8 x half> %halfv8, <8 x half> %halfv8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v8f32_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %v4f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
; ALL-SIZE-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void
;
- %v4i8 = shufflevector <2 x i8> undef, <2 x i8> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %v8i8 = shufflevector <4 x i8> undef, <4 x i8> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %v16i8 = shufflevector <8 x i8> undef, <8 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %v4i16 = shufflevector <2 x i16> undef, <2 x i16> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %v8i16 = shufflevector <4 x i16> undef, <4 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %v16i16 = shufflevector <8 x i16> undef, <8 x i16> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %v4i32 = shufflevector <2 x i32> undef, <2 x i32> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %v8i32 = shufflevector <4 x i32> undef, <4 x i32> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %v4i64 = shufflevector <2 x i64> undef, <2 x i64> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %v4f16 = shufflevector <2 x half> undef, <2 x half> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %v8f16 = shufflevector <4 x half> undef, <4 x half> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %v16f16 = shufflevector <8 x half> undef, <8 x half> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
- %v4f32 = shufflevector <2 x float> undef, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
- %v8f32 = shufflevector <4 x float> undef, <4 x float> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
- %v4f64 = shufflevector <2 x double> undef, <2 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v4i8 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8i8 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v16i8 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4i16 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8i16 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v16i16 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4i32 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8i32 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v4i64 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v4f16 = shufflevector <2 x half> %halfv2, <2 x half> %halfv2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8f16 = shufflevector <4 x half> %halfv4, <4 x half> %halfv4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v16f16 = shufflevector <8 x half> %halfv8, <8 x half> %halfv8, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4f32 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8f32 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v4f64 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v4i8_2 = shufflevector <2 x i8> %i8v2, <2 x i8> %i8v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8i8_2 = shufflevector <4 x i8> %i8v4, <4 x i8> %i8v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v16i8_2 = shufflevector <8 x i8> %i8v8, <8 x i8> %i8v8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4i16_2 = shufflevector <2 x i16> %i16v2, <2 x i16> %i16v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8i16_2 = shufflevector <4 x i16> %i16v4, <4 x i16> %i16v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v16i16_2 = shufflevector <8 x i16> %i16v8, <8 x i16> %i16v8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4i32_2 = shufflevector <2 x i32> %i32v2, <2 x i32> %i32v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8i32_2 = shufflevector <4 x i32> %i32v4, <4 x i32> %i32v4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v4i64_2 = shufflevector <2 x i64> %i64v2, <2 x i64> %i64v2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v4f16_2 = shufflevector <2 x half> %halfv2, <2 x half> %halfv2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8f16_2 = shufflevector <4 x half> %halfv4, <4 x half> %halfv4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v16f16_2 = shufflevector <8 x half> %halfv8, <8 x half> %halfv8_2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+ %v4f32_2 = shufflevector <2 x float> %floatv2, <2 x float> %floatv2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+ %v8f32_2 = shufflevector <4 x float> %floatv4, <4 x float> %floatv4_2, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+ %v4f64_2 = shufflevector <2 x double> %doublev2, <2 x double> %doublev2_2, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
ret void
}
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll b/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll
index 8c436de4c3f6..f333bc3fa231 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/depend_diff_types.ll
@@ -117,16 +117,8 @@ exit:
; CHECK-LABEL: function 'neg_dist_dep_type_size_equivalence':
; CHECK-NEXT: loop:
; CHECK-NEXT: Report: unsafe dependent memory operations in loop.
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Backward loop carried data dependence that prevents store-to-load forwarding.
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %ld.f64 = load double, ptr %gep.iv, align 8 ->
-; CHECK-NEXT: store i32 %ld.i64.i32, ptr %gep.iv.n.i64, align 8
-; CHECK-EMPTY:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %ld.i64 = load i64, ptr %gep.iv, align 8 ->
-; CHECK-NEXT: store i32 %ld.i64.i32, ptr %gep.iv.n.i64, align 8
-; CHECK-EMPTY:
; CHECK-NEXT: BackwardVectorizableButPreventsForwarding:
; CHECK-NEXT: %ld.f64 = load double, ptr %gep.iv, align 8 ->
; CHECK-NEXT: store double %val, ptr %gep.iv.101.i64, align 8
@@ -137,7 +129,7 @@ exit:
; CHECK-EMPTY:
; CHECK-NEXT: Unknown:
; CHECK-NEXT: store double %val, ptr %gep.iv.101.i64, align 8 ->
-; CHECK-NEXT: store i32 %ld.i64.i32, ptr %gep.iv.n.i64, align 8
+; CHECK-NEXT: store i32 %ld.i64.i32, ptr %gep.iv.n.i64, align 8
; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/invariant-dependence-before.ll b/llvm/test/Analysis/LoopAccessAnalysis/invariant-dependence-before.ll
index 2a210a5a445b..2139804753ef 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/invariant-dependence-before.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/invariant-dependence-before.ll
@@ -4,13 +4,8 @@
define void @test_invar_dependence_before_positive_strided_access_1(ptr %a) {
; CHECK-LABEL: 'test_invar_dependence_before_positive_strided_access_1'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %a, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %gep, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -39,13 +34,8 @@ exit:
define void @test_invar_dependence_before_positive_strided_access_2(ptr %a) {
; CHECK-LABEL: 'test_invar_dependence_before_positive_strided_access_2'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %gep, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %a, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -144,13 +134,8 @@ exit:
define void @test_invar_dependence_before_positive_strided_access_1_different_access_sizes(ptr %a) {
; CHECK-LABEL: 'test_invar_dependence_before_positive_strided_access_1_different_access_sizes'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %a, align 4 ->
-; CHECK-NEXT: store i8 %t, ptr %gep, align 1
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -216,13 +201,8 @@ exit:
define void @test_invar_dependence_before_negative_strided_access_1(ptr %a) {
; CHECK-LABEL: 'test_invar_dependence_before_negative_strided_access_1'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %a, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %gep, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -251,13 +231,8 @@ exit:
define void @test_invar_dependence_before_negative_strided_access_2(ptr %a) {
; CHECK-LABEL: 'test_invar_dependence_before_negative_strided_access_2'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %gep, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %a, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -357,13 +332,8 @@ exit:
define void @test_both_invar_before_1(ptr %a) {
; CHECK-LABEL: 'test_both_invar_before_1'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %a, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %gep.off, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -391,13 +361,8 @@ exit:
define void @test_both_invar_before_2(ptr %a) {
; CHECK-LABEL: 'test_both_invar_before_2'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %gep.off, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %a, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -754,3 +719,68 @@ loop:
exit:
ret void
}
+
+define void @test_invar_vector_dependence_before_positive_strided_access_1(ptr %a) {
+; CHECK-LABEL: 'test_invar_vector_dependence_before_positive_strided_access_1'
+; CHECK-NEXT: loop:
+; CHECK-NEXT: Memory dependences are safe
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ %gep.off = getelementptr i8, ptr %a, i32 4
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep = getelementptr i32, ptr %gep.off, i32 %iv
+ %l = load <4 x i8>, ptr %a
+ store i32 0, ptr %gep
+ %iv.next = add i32 %iv, 1
+ %ec = icmp eq i32 %iv.next, 100
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
+
+define void @test_invar_scalable_dependence_before_positive_strided_access_1(ptr %a) {
+; CHECK-LABEL: 'test_invar_scalable_dependence_before_positive_strided_access_1'
+; CHECK-NEXT: loop:
+; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
+; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Dependences:
+; CHECK-NEXT: Unknown:
+; CHECK-NEXT: %l = load <vscale x 4 x i8>, ptr %a, align 4 ->
+; CHECK-NEXT: store i32 0, ptr %gep, align 4
+; CHECK-EMPTY:
+; CHECK-NEXT: Run-time memory checks:
+; CHECK-NEXT: Grouped accesses:
+; CHECK-EMPTY:
+; CHECK-NEXT: Non vectorizable stores to invariant address were not found in loop.
+; CHECK-NEXT: SCEV assumptions:
+; CHECK-EMPTY:
+; CHECK-NEXT: Expressions re-written:
+;
+entry:
+ %gep.off = getelementptr i8, ptr %a, i32 4
+ br label %loop
+
+loop:
+ %iv = phi i32 [ 0, %entry ], [ %iv.next, %loop ]
+ %gep = getelementptr i32, ptr %gep.off, i32 %iv
+ %l = load <vscale x 4 x i8>, ptr %a
+ store i32 0, ptr %gep
+ %iv.next = add i32 %iv, 1
+ %ec = icmp eq i32 %iv.next, 100
+ br i1 %ec, label %exit, label %loop
+
+exit:
+ ret void
+}
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/loop-invariant-dep-with-backedge-taken-count.ll b/llvm/test/Analysis/LoopAccessAnalysis/loop-invariant-dep-with-backedge-taken-count.ll
index 02285031f628..723d01b38f45 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/loop-invariant-dep-with-backedge-taken-count.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/loop-invariant-dep-with-backedge-taken-count.ll
@@ -7,13 +7,8 @@
define void @test_distance_greater_than_BTC_100(ptr %a) {
; CHECK-LABEL: 'test_distance_greater_than_BTC_100'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %gep.x, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %gep, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
@@ -112,13 +107,8 @@ exit:
define void @test_distance_greater_than_BTC_10000(ptr %a) {
; CHECK-LABEL: 'test_distance_greater_than_BTC_10000'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %gep.x, align 4 ->
-; CHECK-NEXT: store i32 %l, ptr %gep, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
diff --git a/llvm/test/Analysis/LoopAccessAnalysis/non-constant-strides-backward.ll b/llvm/test/Analysis/LoopAccessAnalysis/non-constant-strides-backward.ll
index 416742a94e0d..845ff078ee0e 100644
--- a/llvm/test/Analysis/LoopAccessAnalysis/non-constant-strides-backward.ll
+++ b/llvm/test/Analysis/LoopAccessAnalysis/non-constant-strides-backward.ll
@@ -45,13 +45,8 @@ exit:
define void @different_non_constant_strides_known_backward_distance_larger_than_trip_count(ptr %A) {
; CHECK-LABEL: 'different_non_constant_strides_known_backward_distance_larger_than_trip_count'
; CHECK-NEXT: loop:
-; CHECK-NEXT: Report: unsafe dependent memory operations in loop. Use #pragma clang loop distribute(enable) to allow loop distribution to attempt to isolate the offending operations into a separate loop
-; CHECK-NEXT: Unknown data dependence.
+; CHECK-NEXT: Memory dependences are safe
; CHECK-NEXT: Dependences:
-; CHECK-NEXT: Unknown:
-; CHECK-NEXT: %l = load i32, ptr %gep, align 4 ->
-; CHECK-NEXT: store i32 %add, ptr %gep.mul.2, align 4
-; CHECK-EMPTY:
; CHECK-NEXT: Run-time memory checks:
; CHECK-NEXT: Grouped accesses:
; CHECK-EMPTY:
diff --git a/llvm/test/Assembler/ConstantExprFold.ll b/llvm/test/Assembler/ConstantExprFold.ll
index 4ce44d2e5513..ab7e767d767b 100644
--- a/llvm/test/Assembler/ConstantExprFold.ll
+++ b/llvm/test/Assembler/ConstantExprFold.ll
@@ -37,22 +37,22 @@
; Need a function to make update_test_checks.py work.
;.
-; CHECK: @[[A:[a-zA-Z0-9_$"\\.-]+]] = global i64 0
-; CHECK: @[[ADD:[a-zA-Z0-9_$"\\.-]+]] = global ptr @A
-; CHECK: @[[SUB:[a-zA-Z0-9_$"\\.-]+]] = global ptr @A
-; CHECK: @[[MUL:[a-zA-Z0-9_$"\\.-]+]] = global ptr null
-; CHECK: @[[XOR:[a-zA-Z0-9_$"\\.-]+]] = global ptr @A
-; CHECK: @[[B:[a-zA-Z0-9_$"\\.-]+]] = external global [[TY:%.*]]
-; CHECK: @[[ICMP_ULT1:[a-zA-Z0-9_$"\\.-]+]] = global i1 icmp ugt (ptr getelementptr inbounds (i64, ptr @A, i64 1), ptr @A)
-; CHECK: @[[ICMP_SLT:[a-zA-Z0-9_$"\\.-]+]] = global i1 false
-; CHECK: @[[ICMP_ULT2:[a-zA-Z0-9_$"\\.-]+]] = global i1 icmp ugt (ptr getelementptr inbounds ([[TY:%.*]], ptr @B, i64 0, i32 1), ptr @B)
-; CHECK: @[[CONS:[a-zA-Z0-9_$"\\.-]+]] = weak global i32 0, align 8
-; CHECK: @[[GEP1:[a-zA-Z0-9_$"\\.-]+]] = global <2 x ptr> undef
-; CHECK: @[[GEP2:[a-zA-Z0-9_$"\\.-]+]] = global <2 x ptr> undef
-; CHECK: @[[GEP3:[a-zA-Z0-9_$"\\.-]+]] = global <2 x ptr> zeroinitializer
-; CHECK: @[[GEP4:[a-zA-Z0-9_$"\\.-]+]] = global <2 x ptr> zeroinitializer
-; CHECK: @[[BITCAST1:[a-zA-Z0-9_$"\\.-]+]] = global <2 x i32> <i32 -1, i32 -1>
-; CHECK: @[[BITCAST2:[a-zA-Z0-9_$"\\.-]+]] = global <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>
+; CHECK: @A = global i64 0
+; CHECK: @add = global ptr @A
+; CHECK: @sub = global ptr @A
+; CHECK: @mul = global ptr null
+; CHECK: @xor = global ptr @A
+; CHECK: @B = external global %Ty
+; CHECK: @icmp_ult1 = global i1 icmp ugt (ptr getelementptr inbounds (i64, ptr @A, i64 1), ptr @A)
+; CHECK: @icmp_slt = global i1 false
+; CHECK: @icmp_ult2 = global i1 icmp ugt (ptr getelementptr inbounds (%Ty, ptr @B, i64 0, i32 1), ptr @B)
+; CHECK: @cons = weak global i32 0, align 8
+; CHECK: @gep1 = global <2 x ptr> undef
+; CHECK: @gep2 = global <2 x ptr> undef
+; CHECK: @gep3 = global <2 x ptr> zeroinitializer
+; CHECK: @gep4 = global <2 x ptr> zeroinitializer
+; CHECK: @bitcast1 = global <2 x i32> <i32 -1, i32 -1>
+; CHECK: @bitcast2 = global <4 x i16> <i16 -1, i16 -1, i16 -1, i16 -1>
;.
define void @dummy() {
; CHECK-LABEL: @dummy(
diff --git a/llvm/test/CodeGen/AArch64/exp10-libcall-names.ll b/llvm/test/CodeGen/AArch64/exp10-libcall-names.ll
new file mode 100644
index 000000000000..1220aec447ab
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/exp10-libcall-names.ll
@@ -0,0 +1,39 @@
+; RUN: llc -mtriple=aarch64-linux-gnu < %s | FileCheck -check-prefix=LINUX %s
+; RUN: llc -mtriple=aarch64-apple-macos10.9 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=aarch64-apple-ios7.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=aarch64-apple-tvos7.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=aarch64-apple-watchos7.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=aarch64-apple-xros7.0 < %s | FileCheck -check-prefix=APPLE %s
+
+; RUN: not llc -mtriple=aarch64-apple-macos10.8 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=aarch64-apple-ios6.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=aarch64-apple-tvos6.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=aarch64-apple-xros6.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+
+; Check exp10/exp10f is emitted as __exp10/__exp10f on assorted systems.
+
+; ERR: no libcall available for fexp10
+
+define float @test_exp10_f32(float %x) {
+; LINUX-LABEL: test_exp10_f32:
+; LINUX: // %bb.0:
+; LINUX-NEXT: b exp10f
+;
+; APPLE-LABEL: test_exp10_f32:
+; APPLE: ; %bb.0:
+; APPLE-NEXT: b ___exp10f
+ %ret = call float @llvm.exp10.f32(float %x)
+ ret float %ret
+}
+
+define double @test_exp10_f64(double %x) {
+; LINUX-LABEL: test_exp10_f64:
+; LINUX: // %bb.0:
+; LINUX-NEXT: b exp10
+;
+; APPLE-LABEL: test_exp10_f64:
+; APPLE: ; %bb.0:
+; APPLE-NEXT: b ___exp10
+ %ret = call double @llvm.exp10.f64(double %x)
+ ret double %ret
+}
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
index 7748b481cf5b..85cfb9b320f1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-ctlz-zero-undef.mir
@@ -82,8 +82,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
- ; CHECK-NEXT: [[AMDGPU_FFBH_U32:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[LSHR]](s32)
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[AMDGPU_FFBH_U32:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[SHL]](s32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[AMDGPU_FFBH_U32]], [[C1]]
; CHECK-NEXT: $vgpr0 = COPY [[AND]](s32)
@@ -147,10 +147,10 @@ body: |
; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(s32) = G_BITCAST [[COPY]](<2 x s16>)
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 16
; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; CHECK-NEXT: [[LSHR1:%[0-9]+]]:_(s32) = G_LSHR [[BITCAST]], [[C]](s32)
- ; CHECK-NEXT: [[AMDGPU_FFBH_U32:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[LSHR1]](s32)
- ; CHECK-NEXT: [[LSHR2:%[0-9]+]]:_(s32) = G_LSHR [[LSHR]], [[C]](s32)
- ; CHECK-NEXT: [[AMDGPU_FFBH_U321:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[LSHR2]](s32)
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[BITCAST]], [[C]](s32)
+ ; CHECK-NEXT: [[AMDGPU_FFBH_U32:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[SHL]](s32)
+ ; CHECK-NEXT: [[SHL2:%[0-9]+]]:_(s32) = G_SHL [[LSHR]], [[C]](s32)
+ ; CHECK-NEXT: [[AMDGPU_FFBH_U321:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[SHL2]](s32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 65535
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[AMDGPU_FFBH_U32]], [[C1]]
; CHECK-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[AMDGPU_FFBH_U321]], [[C1]]
@@ -175,8 +175,8 @@ body: |
; CHECK-NEXT: {{ $}}
; CHECK-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
; CHECK-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 25
- ; CHECK-NEXT: [[LSHR:%[0-9]+]]:_(s32) = G_LSHR [[COPY]], [[C]](s32)
- ; CHECK-NEXT: [[FFBH:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[LSHR]](s32)
+ ; CHECK-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[C]](s32)
+ ; CHECK-NEXT: [[FFBH:%[0-9]+]]:_(s32) = G_AMDGPU_FFBH_U32 [[SHL]](s32)
; CHECK-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 127
; CHECK-NEXT: [[AND:%[0-9]+]]:_(s32) = G_AND [[FFBH]], [[C1]]
; CHECK-NEXT: $vgpr0 = COPY [[AND]](s32)
diff --git a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
index 2e64a3456c24..7932f8d1fc5b 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgpu-simplify-libcall-rootn.ll
@@ -272,8 +272,8 @@ define half @test_rootn_f16_1(half %x) {
define half @test_rootn_f16_2(half %x) {
; CHECK-LABEL: define half @test_rootn_f16_2(
; CHECK-SAME: half [[X:%.*]]) {
-; CHECK-NEXT: [[__ROOTN2SQRT:%.*]] = call half @_Z4sqrtDh(half [[X]])
-; CHECK-NEXT: ret half [[__ROOTN2SQRT]]
+; CHECK-NEXT: [[CALL:%.*]] = call half @llvm.sqrt.f16(half [[X]]), !fpmath [[META0:![0-9]+]]
+; CHECK-NEXT: ret half [[CALL]]
;
%call = tail call half @_Z5rootnDhi(half %x, i32 2)
ret half %call
@@ -302,7 +302,8 @@ define half @test_rootn_f16_neg1(half %x) {
define half @test_rootn_f16_neg2(half %x) {
; CHECK-LABEL: define half @test_rootn_f16_neg2(
; CHECK-SAME: half [[X:%.*]]) {
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call half @_Z5rsqrtDh(half [[X]])
+; CHECK-NEXT: [[TMP1:%.*]] = call contract half @llvm.sqrt.f16(half [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = fdiv contract half 0xH3C00, [[TMP1]], !fpmath [[META0]]
; CHECK-NEXT: ret half [[__ROOTN2RSQRT]]
;
%call = tail call half @_Z5rootnDhi(half %x, i32 -2)
@@ -342,8 +343,7 @@ define <2 x half> @test_rootn_v2f16_0(<2 x half> %x) {
define <2 x half> @test_rootn_v2f16_1(<2 x half> %x) {
; CHECK-LABEL: define <2 x half> @test_rootn_v2f16_1(
; CHECK-SAME: <2 x half> [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> [[X]], <2 x i32> <i32 1, i32 1>)
-; CHECK-NEXT: ret <2 x half> [[CALL]]
+; CHECK-NEXT: ret <2 x half> [[X]]
;
%call = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> %x, <2 x i32> <i32 1, i32 1>)
ret <2 x half> %call
@@ -352,7 +352,7 @@ define <2 x half> @test_rootn_v2f16_1(<2 x half> %x) {
define <2 x half> @test_rootn_v2f16_2(<2 x half> %x) {
; CHECK-LABEL: define <2 x half> @test_rootn_v2f16_2(
; CHECK-SAME: <2 x half> [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> [[X]], <2 x i32> <i32 2, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call <2 x half> @llvm.sqrt.v2f16(<2 x half> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <2 x half> [[CALL]]
;
%call = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> %x, <2 x i32> <i32 2, i32 2>)
@@ -362,8 +362,8 @@ define <2 x half> @test_rootn_v2f16_2(<2 x half> %x) {
define <2 x half> @test_rootn_v2f16_neg1(<2 x half> %x) {
; CHECK-LABEL: define <2 x half> @test_rootn_v2f16_neg1(
; CHECK-SAME: <2 x half> [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> [[X]], <2 x i32> <i32 -1, i32 -1>)
-; CHECK-NEXT: ret <2 x half> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <2 x half> <half 0xH3C00, half 0xH3C00>, [[X]]
+; CHECK-NEXT: ret <2 x half> [[__ROOTN2DIV]]
;
%call = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> %x, <2 x i32> <i32 -1, i32 -1>)
ret <2 x half> %call
@@ -372,8 +372,9 @@ define <2 x half> @test_rootn_v2f16_neg1(<2 x half> %x) {
define <2 x half> @test_rootn_v2f16_neg2(<2 x half> %x) {
; CHECK-LABEL: define <2 x half> @test_rootn_v2f16_neg2(
; CHECK-SAME: <2 x half> [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> [[X]], <2 x i32> <i32 -2, i32 -2>)
-; CHECK-NEXT: ret <2 x half> [[CALL]]
+; CHECK-NEXT: [[TMP1:%.*]] = call contract <2 x half> @llvm.sqrt.v2f16(<2 x half> [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = fdiv contract <2 x half> <half 0xH3C00, half 0xH3C00>, [[TMP1]], !fpmath [[META0]]
+; CHECK-NEXT: ret <2 x half> [[__ROOTN2RSQRT]]
;
%call = tail call <2 x half> @_Z5rootnDv2_DhDv2_i(<2 x half> %x, <2 x i32> <i32 -2, i32 -2>)
ret <2 x half> %call
@@ -512,7 +513,8 @@ define float @test_rootn_f32__y_1__strictfp(float %x) #1 {
; CHECK-LABEL: define float @test_rootn_f32__y_1__strictfp(
; CHECK-SAME: float [[X:%.*]]) #[[ATTR0:[0-9]+]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: ret float [[X]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 1) #[[ATTR0]]
+; CHECK-NEXT: ret float [[CALL]]
;
entry:
%call = tail call float @_Z5rootnfi(float %x, i32 1) #1
@@ -523,8 +525,7 @@ define <2 x float> @test_rootn_v2f32__y_1(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_1(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 1, i32 1>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: ret <2 x float> [[X]]
;
entry:
%call = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 1, i32 1>)
@@ -547,8 +548,7 @@ define <2 x float> @test_rootn_v2f32__y_1_undef(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_1_undef(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 1, i32 poison>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: ret <2 x float> [[X]]
;
entry:
%call = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 1, i32 poison>)
@@ -559,8 +559,7 @@ define <3 x float> @test_rootn_v3f32__y_1(<3 x float> %x) {
; CHECK-LABEL: define <3 x float> @test_rootn_v3f32__y_1(
; CHECK-SAME: <3 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> [[X]], <3 x i32> <i32 1, i32 1, i32 1>)
-; CHECK-NEXT: ret <3 x float> [[CALL]]
+; CHECK-NEXT: ret <3 x float> [[X]]
;
entry:
%call = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> %x, <3 x i32> <i32 1, i32 1, i32 1>)
@@ -571,8 +570,7 @@ define <3 x float> @test_rootn_v3f32__y_1_undef(<3 x float> %x) {
; CHECK-LABEL: define <3 x float> @test_rootn_v3f32__y_1_undef(
; CHECK-SAME: <3 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> [[X]], <3 x i32> <i32 1, i32 1, i32 poison>)
-; CHECK-NEXT: ret <3 x float> [[CALL]]
+; CHECK-NEXT: ret <3 x float> [[X]]
;
entry:
%call = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> %x, <3 x i32> <i32 1, i32 1, i32 poison>)
@@ -583,8 +581,7 @@ define <4 x float> @test_rootn_v4f32__y_1(<4 x float> %x) {
; CHECK-LABEL: define <4 x float> @test_rootn_v4f32__y_1(
; CHECK-SAME: <4 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <4 x float> @_Z5rootnDv4_fDv4_i(<4 x float> [[X]], <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
-; CHECK-NEXT: ret <4 x float> [[CALL]]
+; CHECK-NEXT: ret <4 x float> [[X]]
;
entry:
%call = tail call <4 x float> @_Z5rootnDv4_fDv4_i(<4 x float> %x, <4 x i32> <i32 1, i32 1, i32 1, i32 1>)
@@ -595,8 +592,7 @@ define <8 x float> @test_rootn_v8f32__y_1(<8 x float> %x) {
; CHECK-LABEL: define <8 x float> @test_rootn_v8f32__y_1(
; CHECK-SAME: <8 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <8 x float> @_Z5rootnDv8_fDv8_i(<8 x float> [[X]], <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
-; CHECK-NEXT: ret <8 x float> [[CALL]]
+; CHECK-NEXT: ret <8 x float> [[X]]
;
entry:
%call = tail call <8 x float> @_Z5rootnDv8_fDv8_i(<8 x float> %x, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
@@ -607,8 +603,7 @@ define <16 x float> @test_rootn_v16f32__y_1(<16 x float> %x) {
; CHECK-LABEL: define <16 x float> @test_rootn_v16f32__y_1(
; CHECK-SAME: <16 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <16 x float> @_Z5rootnDv16_fDv16_i(<16 x float> [[X]], <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
-; CHECK-NEXT: ret <16 x float> [[CALL]]
+; CHECK-NEXT: ret <16 x float> [[X]]
;
entry:
%call = tail call <16 x float> @_Z5rootnDv16_fDv16_i(<16 x float> %x, <16 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>)
@@ -619,8 +614,8 @@ define float @test_rootn_f32__y_2(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_2(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2SQRT:%.*]] = call float @_Z4sqrtf(float [[X]])
-; CHECK-NEXT: ret float [[__ROOTN2SQRT]]
+; CHECK-NEXT: [[CALL:%.*]] = call float @llvm.sqrt.f32(float [[X]]), !fpmath [[META0]]
+; CHECK-NEXT: ret float [[CALL]]
;
entry:
%call = tail call float @_Z5rootnfi(float %x, i32 2)
@@ -631,8 +626,8 @@ define float @test_rootn_f32__y_2_flags(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_2_flags(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2SQRT:%.*]] = call nnan nsz float @_Z4sqrtf(float [[X]])
-; CHECK-NEXT: ret float [[__ROOTN2SQRT]]
+; CHECK-NEXT: [[CALL:%.*]] = call nnan nsz float @llvm.sqrt.f32(float [[X]]), !fpmath [[META0]]
+; CHECK-NEXT: ret float [[CALL]]
;
entry:
%call = tail call nnan nsz float @_Z5rootnfi(float %x, i32 2)
@@ -644,8 +639,8 @@ define float @test_rootn_f32__y_2_fpmath_3(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_2_fpmath_3(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2SQRT:%.*]] = call nnan nsz float @_Z4sqrtf(float [[X]])
-; CHECK-NEXT: ret float [[__ROOTN2SQRT]]
+; CHECK-NEXT: [[CALL:%.*]] = call nnan nsz float @llvm.sqrt.f32(float [[X]]), !fpmath [[META1:![0-9]+]]
+; CHECK-NEXT: ret float [[CALL]]
;
entry:
%call = tail call nnan nsz float @_Z5rootnfi(float %x, i32 2), !fpmath !0
@@ -656,7 +651,7 @@ define <2 x float> @test_rootn_v2f32__y_2_flags(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_2_flags(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call nnan nsz <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 2, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call nnan nsz <2 x float> @llvm.sqrt.v2f32(<2 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <2 x float> [[CALL]]
;
entry:
@@ -668,7 +663,7 @@ define <3 x float> @test_rootn_v3f32__y_2(<3 x float> %x) {
; CHECK-LABEL: define <3 x float> @test_rootn_v3f32__y_2(
; CHECK-SAME: <3 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> [[X]], <3 x i32> <i32 2, i32 2, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call <3 x float> @llvm.sqrt.v3f32(<3 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <3 x float> [[CALL]]
;
entry:
@@ -680,7 +675,7 @@ define <3 x float> @test_rootn_v3f32__y_2_undef(<3 x float> %x) {
; CHECK-LABEL: define <3 x float> @test_rootn_v3f32__y_2_undef(
; CHECK-SAME: <3 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> [[X]], <3 x i32> <i32 2, i32 poison, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call <3 x float> @llvm.sqrt.v3f32(<3 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <3 x float> [[CALL]]
;
entry:
@@ -692,7 +687,7 @@ define <4 x float> @test_rootn_v4f32__y_2(<4 x float> %x) {
; CHECK-LABEL: define <4 x float> @test_rootn_v4f32__y_2(
; CHECK-SAME: <4 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <4 x float> @_Z5rootnDv4_fDv4_i(<4 x float> [[X]], <4 x i32> <i32 2, i32 2, i32 2, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call <4 x float> @llvm.sqrt.v4f32(<4 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <4 x float> [[CALL]]
;
entry:
@@ -704,7 +699,7 @@ define <8 x float> @test_rootn_v8f32__y_2(<8 x float> %x) {
; CHECK-LABEL: define <8 x float> @test_rootn_v8f32__y_2(
; CHECK-SAME: <8 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <8 x float> @_Z5rootnDv8_fDv8_i(<8 x float> [[X]], <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call <8 x float> @llvm.sqrt.v8f32(<8 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <8 x float> [[CALL]]
;
entry:
@@ -716,7 +711,7 @@ define <16 x float> @test_rootn_v16f32__y_2(<16 x float> %x) {
; CHECK-LABEL: define <16 x float> @test_rootn_v16f32__y_2(
; CHECK-SAME: <16 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <16 x float> @_Z5rootnDv16_fDv16_i(<16 x float> [[X]], <16 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>)
+; CHECK-NEXT: [[CALL:%.*]] = call <16 x float> @llvm.sqrt.v16f32(<16 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <16 x float> [[CALL]]
;
entry:
@@ -740,8 +735,8 @@ define <2 x float> @test_rootn_v2f32__y_3(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_3(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 3, i32 3>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2CBRT:%.*]] = call <2 x float> @_Z4cbrtDv2_f(<2 x float> [[X]])
+; CHECK-NEXT: ret <2 x float> [[__ROOTN2CBRT]]
;
entry:
%call = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 3, i32 3>)
@@ -764,7 +759,7 @@ define <2 x float> @test_rootn_v2f32__y_nonsplat_2_poison(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_nonsplat_2_poison(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 2, i32 poison>)
+; CHECK-NEXT: [[CALL:%.*]] = call <2 x float> @llvm.sqrt.v2f32(<2 x float> [[X]]), !fpmath [[META0]]
; CHECK-NEXT: ret <2 x float> [[CALL]]
;
entry:
@@ -800,8 +795,8 @@ define <2 x float> @test_rootn_v2f32__y_neg1(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_neg1(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 -1, i32 -1>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <2 x float> <float 1.000000e+00, float 1.000000e+00>, [[X]]
+; CHECK-NEXT: ret <2 x float> [[__ROOTN2DIV]]
;
entry:
%call = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 -1, i32 -1>)
@@ -812,8 +807,8 @@ define <3 x float> @test_rootn_v3f32__y_neg1(<3 x float> %x) {
; CHECK-LABEL: define <3 x float> @test_rootn_v3f32__y_neg1(
; CHECK-SAME: <3 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> [[X]], <3 x i32> <i32 -1, i32 -1, i32 -1>)
-; CHECK-NEXT: ret <3 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <3 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[X]]
+; CHECK-NEXT: ret <3 x float> [[__ROOTN2DIV]]
;
entry:
%call = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> %x, <3 x i32> <i32 -1, i32 -1, i32 -1>)
@@ -824,8 +819,8 @@ define <3 x float> @test_rootn_v3f32__y_neg1_undef(<3 x float> %x) {
; CHECK-LABEL: define <3 x float> @test_rootn_v3f32__y_neg1_undef(
; CHECK-SAME: <3 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> [[X]], <3 x i32> <i32 -1, i32 -1, i32 poison>)
-; CHECK-NEXT: ret <3 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <3 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[X]]
+; CHECK-NEXT: ret <3 x float> [[__ROOTN2DIV]]
;
entry:
%call = tail call <3 x float> @_Z5rootnDv3_fDv3_i(<3 x float> %x, <3 x i32> <i32 -1, i32 -1, i32 poison>)
@@ -836,8 +831,8 @@ define <4 x float> @test_rootn_v4f32__y_neg1(<4 x float> %x) {
; CHECK-LABEL: define <4 x float> @test_rootn_v4f32__y_neg1(
; CHECK-SAME: <4 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <4 x float> @_Z5rootnDv4_fDv4_i(<4 x float> [[X]], <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
-; CHECK-NEXT: ret <4 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[X]]
+; CHECK-NEXT: ret <4 x float> [[__ROOTN2DIV]]
;
entry:
%call = tail call <4 x float> @_Z5rootnDv4_fDv4_i(<4 x float> %x, <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>)
@@ -848,8 +843,8 @@ define <8 x float> @test_rootn_v8f32__y_neg1(<8 x float> %x) {
; CHECK-LABEL: define <8 x float> @test_rootn_v8f32__y_neg1(
; CHECK-SAME: <8 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <8 x float> @_Z5rootnDv8_fDv8_i(<8 x float> [[X]], <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>)
-; CHECK-NEXT: ret <8 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <8 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[X]]
+; CHECK-NEXT: ret <8 x float> [[__ROOTN2DIV]]
;
entry:
%call = tail call <8 x float> @_Z5rootnDv8_fDv8_i(<8 x float> %x, <8 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>)
@@ -860,8 +855,8 @@ define <16 x float> @test_rootn_v16f32__y_neg1(<16 x float> %x) {
; CHECK-LABEL: define <16 x float> @test_rootn_v16f32__y_neg1(
; CHECK-SAME: <16 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <16 x float> @_Z5rootnDv16_fDv16_i(<16 x float> [[X]], <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>)
-; CHECK-NEXT: ret <16 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2DIV:%.*]] = fdiv <16 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>, [[X]]
+; CHECK-NEXT: ret <16 x float> [[__ROOTN2DIV]]
;
entry:
%call = tail call <16 x float> @_Z5rootnDv16_fDv16_i(<16 x float> %x, <16 x i32> <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>)
@@ -872,7 +867,8 @@ define float @test_rootn_f32__y_neg2(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call float @_Z5rsqrtf(float [[X]])
+; CHECK-NEXT: [[TMP0:%.*]] = call contract float @llvm.sqrt.f32(float [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = fdiv contract float 1.000000e+00, [[TMP0]], !fpmath [[META0]]
; CHECK-NEXT: ret float [[__ROOTN2RSQRT]]
;
entry:
@@ -884,7 +880,8 @@ define float @test_rootn_f32__y_neg2__flags(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2__flags(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call nnan nsz float @_Z5rsqrtf(float [[X]])
+; CHECK-NEXT: [[TMP0:%.*]] = call nnan nsz contract float @llvm.sqrt.f32(float [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = fdiv nnan nsz contract float 1.000000e+00, [[TMP0]], !fpmath [[META0]]
; CHECK-NEXT: ret float [[__ROOTN2RSQRT]]
;
entry:
@@ -896,7 +893,7 @@ define float @test_rootn_f32__y_neg2__strictfp(float %x) #1 {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2__strictfp(
; CHECK-SAME: float [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call float @_Z5rsqrtf(float [[X]]) #[[ATTR0]]
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -2) #[[ATTR0]]
; CHECK-NEXT: ret float [[__ROOTN2RSQRT]]
;
entry:
@@ -908,7 +905,7 @@ define float @test_rootn_f32__y_neg2__noinline(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2__noinline(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = call float @_Z5rsqrtf(float [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -2) #[[ATTR3:[0-9]+]]
; CHECK-NEXT: ret float [[__ROOTN2RSQRT]]
;
entry:
@@ -920,7 +917,7 @@ define float @test_rootn_f32__y_neg2__nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2__nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -2) #[[ATTR2:[0-9]+]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -2) #[[ATTR4:[0-9]+]]
; CHECK-NEXT: ret float [[CALL]]
;
entry:
@@ -932,8 +929,9 @@ define <2 x float> @test_rootn_v2f32__y_neg2(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_neg2(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 -2, i32 -2>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: [[TMP0:%.*]] = call contract <2 x float> @llvm.sqrt.v2f32(<2 x float> [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = fdiv contract <2 x float> <float 1.000000e+00, float 1.000000e+00>, [[TMP0]], !fpmath [[META0]]
+; CHECK-NEXT: ret <2 x float> [[__ROOTN2RSQRT]]
;
entry:
%call = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 -2, i32 -2>)
@@ -944,8 +942,9 @@ define <2 x float> @test_rootn_v2f32__y_neg2__flags(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_neg2__flags(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call nnan nsz <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 -2, i32 -2>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: [[TMP0:%.*]] = call nnan nsz contract <2 x float> @llvm.sqrt.v2f32(<2 x float> [[X]])
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = fdiv nnan nsz contract <2 x float> <float 1.000000e+00, float 1.000000e+00>, [[TMP0]], !fpmath [[META0]]
+; CHECK-NEXT: ret <2 x float> [[__ROOTN2RSQRT]]
;
entry:
%call = tail call nsz nnan <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 -2, i32 -2>)
@@ -956,8 +955,8 @@ define <2 x float> @test_rootn_v2f32__y_neg2__strictfp(<2 x float> %x) #1 {
; CHECK-LABEL: define <2 x float> @test_rootn_v2f32__y_neg2__strictfp(
; CHECK-SAME: <2 x float> [[X:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 -2, i32 -2>) #[[ATTR0]]
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2RSQRT:%.*]] = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 -2, i32 -2>) #[[ATTR0]]
+; CHECK-NEXT: ret <2 x float> [[__ROOTN2RSQRT]]
;
entry:
%call = tail call <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 -2, i32 -2>) #1
@@ -1132,7 +1131,7 @@ define float @test_rootn_fast_f32_nobuiltin(float %x, i32 %y) {
; CHECK-LABEL: define float @test_rootn_fast_f32_nobuiltin(
; CHECK-SAME: float [[X:%.*]], i32 [[Y:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call fast float @_Z5rootnfi(float [[X]], i32 [[Y]]) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call fast float @_Z5rootnfi(float [[X]], i32 [[Y]]) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
entry:
@@ -1266,8 +1265,8 @@ define <2 x float> @test_rootn_afn_nnan_ninf_v2f32__y_3(<2 x float> %x) {
; CHECK-LABEL: define <2 x float> @test_rootn_afn_nnan_ninf_v2f32__y_3(
; CHECK-SAME: <2 x float> [[X:%.*]]) {
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[CALL:%.*]] = tail call nnan ninf afn <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> [[X]], <2 x i32> <i32 3, i32 3>)
-; CHECK-NEXT: ret <2 x float> [[CALL]]
+; CHECK-NEXT: [[__ROOTN2CBRT:%.*]] = call nnan ninf afn <2 x float> @_Z4cbrtDv2_f(<2 x float> [[X]])
+; CHECK-NEXT: ret <2 x float> [[__ROOTN2CBRT]]
;
entry:
%call = tail call afn nnan ninf <2 x float> @_Z5rootnDv2_fDv2_i(<2 x float> %x, <2 x i32> <i32 3, i32 3>)
@@ -1427,7 +1426,7 @@ entry:
define float @test_rootn_f32__y_0_nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_0_nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 0) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 0) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = tail call float @_Z5rootnfi(float %x, i32 0) #0
@@ -1437,7 +1436,7 @@ define float @test_rootn_f32__y_0_nobuiltin(float %x) {
define float @test_rootn_f32__y_1_nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_1_nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 1) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 1) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = tail call float @_Z5rootnfi(float %x, i32 1) #0
@@ -1447,7 +1446,7 @@ define float @test_rootn_f32__y_1_nobuiltin(float %x) {
define float @test_rootn_f32__y_2_nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_2_nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 2) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 2) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = tail call float @_Z5rootnfi(float %x, i32 2) #0
@@ -1457,7 +1456,7 @@ define float @test_rootn_f32__y_2_nobuiltin(float %x) {
define float @test_rootn_f32__y_3_nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_3_nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 3) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 3) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = tail call float @_Z5rootnfi(float %x, i32 3) #0
@@ -1467,7 +1466,7 @@ define float @test_rootn_f32__y_3_nobuiltin(float %x) {
define float @test_rootn_f32__y_neg1_nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_neg1_nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -1) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -1) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = tail call float @_Z5rootnfi(float %x, i32 -1) #0
@@ -1477,7 +1476,7 @@ define float @test_rootn_f32__y_neg1_nobuiltin(float %x) {
define float @test_rootn_f32__y_neg2_nobuiltin(float %x) {
; CHECK-LABEL: define float @test_rootn_f32__y_neg2_nobuiltin(
; CHECK-SAME: float [[X:%.*]]) {
-; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -2) #[[ATTR2]]
+; CHECK-NEXT: [[CALL:%.*]] = tail call float @_Z5rootnfi(float [[X]], i32 -2) #[[ATTR4]]
; CHECK-NEXT: ret float [[CALL]]
;
%call = tail call float @_Z5rootnfi(float %x, i32 -2) #0
@@ -1492,6 +1491,11 @@ attributes #2 = { noinline }
!0 = !{float 3.0}
;.
; CHECK: attributes #[[ATTR0]] = { strictfp }
-; CHECK: attributes #[[ATTR1:[0-9]+]] = { nounwind memory(read) }
-; CHECK: attributes #[[ATTR2]] = { nobuiltin }
+; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind speculatable willreturn memory(none) }
+; CHECK: attributes #[[ATTR2:[0-9]+]] = { nounwind memory(read) }
+; CHECK: attributes #[[ATTR3]] = { noinline }
+; CHECK: attributes #[[ATTR4]] = { nobuiltin }
+;.
+; CHECK: [[META0]] = !{float 2.000000e+00}
+; CHECK: [[META1]] = !{float 3.000000e+00}
;.
diff --git a/llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll b/llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll
new file mode 100644
index 000000000000..7ad24302d783
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/call-defs-mode-register.ll
@@ -0,0 +1,57 @@
+; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -simplify-mir -stop-after=finalize-isel < %s | FileCheck %s
+
+; Check that call / asm get an implicit-def $mode added to them in
+; strictfp functions.
+
+declare protected void @maybe_defs_mode() #0
+
+define float @call_changes_mode(float %x, float %y) #0 {
+ ; CHECK-LABEL: name: call_changes_mode
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: ADJCALLSTACKUP 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
+ ; CHECK-NEXT: [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @maybe_defs_mode, target-flags(amdgpu-rel32-hi) @maybe_defs_mode, implicit-def dead $scc
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:sgpr_128 = COPY $sgpr0_sgpr1_sgpr2_sgpr3
+ ; CHECK-NEXT: $sgpr0_sgpr1_sgpr2_sgpr3 = COPY [[COPY2]]
+ ; CHECK-NEXT: $sgpr30_sgpr31 = SI_CALL killed [[SI_PC_ADD_REL_OFFSET]], @maybe_defs_mode, csr_amdgpu, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit-def $mode
+ ; CHECK-NEXT: ADJCALLSTACKDOWN 0, 0, implicit-def dead $scc, implicit-def $sgpr32, implicit $sgpr32
+ ; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_F32_e64_]]
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0
+ call void @maybe_defs_mode()
+ %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret float %val
+}
+
+define void @tail_call_changes_mode() #0 {
+ ; CHECK-LABEL: name: tail_call_changes_mode
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: [[SI_PC_ADD_REL_OFFSET:%[0-9]+]]:ccr_sgpr_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @maybe_defs_mode, target-flags(amdgpu-rel32-hi) @maybe_defs_mode, implicit-def dead $scc
+ ; CHECK-NEXT: SI_TCRETURN killed [[SI_PC_ADD_REL_OFFSET]], @maybe_defs_mode, 0, csr_amdgpu, implicit-def $mode
+ tail call void @maybe_defs_mode()
+ ret void
+}
+
+define float @asm_changes_mode(float %x, float %y) #0 {
+ ; CHECK-LABEL: name: asm_changes_mode
+ ; CHECK: bb.0 (%ir-block.0):
+ ; CHECK-NEXT: liveins: $vgpr0, $vgpr1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr1
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr0
+ ; CHECK-NEXT: INLINEASM &"; maybe defs mode", 1 /* sideeffect attdialect */
+ ; CHECK-NEXT: [[V_ADD_F32_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_ADD_F32_e64 0, [[COPY1]], 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
+ ; CHECK-NEXT: $vgpr0 = COPY [[V_ADD_F32_e64_]]
+ ; CHECK-NEXT: SI_RETURN implicit $vgpr0
+ call void asm sideeffect "; maybe defs mode", ""()
+ %val = call float @llvm.experimental.constrained.fadd.f32(float %x, float %y, metadata !"round.dynamic", metadata !"fpexcept.ignore")
+ ret float %val
+}
+
+declare float @llvm.experimental.constrained.fadd.f32(float, float, metadata, metadata)
+
+attributes #0 = { strictfp "amdgpu-no-completion-action" "amdgpu-no-default-queue" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-heap-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-lds-kernel-id" "amdgpu-no-multigrid-sync-arg" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" }
diff --git a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
index d94a27e8c020..756b81909968 100644
--- a/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctlz_zero_undef.ll
@@ -377,7 +377,7 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; GFX9-GISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-GISEL-NEXT: s_lshr_b32 s0, s4, 24
+; GFX9-GISEL-NEXT: s_lshl_b32 s0, s4, 24
; GFX9-GISEL-NEXT: s_flbit_i32_b32 s0, s0
; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX9-GISEL-NEXT: global_store_byte v1, v0, s[2:3]
@@ -452,7 +452,7 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; GFX9-GISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-GISEL-NEXT: s_lshr_b32 s0, s4, 16
+; GFX9-GISEL-NEXT: s_lshl_b32 s0, s4, 16
; GFX9-GISEL-NEXT: s_flbit_i32_b32 s0, s0
; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, s0
; GFX9-GISEL-NEXT: global_store_short v1, v0, s[2:3]
@@ -655,7 +655,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_with_select(ptr addrspace(1) noa
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-GISEL-NEXT: global_load_ubyte v1, v0, s[2:3]
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v2, v1
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v2, 24, v1
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v2, v2
; GFX9-GISEL-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX9-GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, 32, v2, vcc
@@ -760,7 +761,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i16_with_select(ptr addrspace(1) no
; GFX9-GISEL-NEXT: global_load_ubyte v2, v0, s[2:3] offset:1
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
; GFX9-GISEL-NEXT: v_lshl_or_b32 v1, v2, 8, v1
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v2, v1
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v1
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v2, v2
; GFX9-GISEL-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX9-GISEL-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
; GFX9-GISEL-NEXT: v_cndmask_b32_e32 v1, 32, v2, vcc
@@ -1167,7 +1169,8 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8(ptr addrspace(1) noalias %out, p
; GFX9-GISEL-NEXT: global_load_ubyte v0, v[0:1], off
; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v0, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
; GFX9-GISEL-NEXT: global_store_byte v1, v0, s[0:1]
; GFX9-GISEL-NEXT: s_endpgm
%tid = call i32 @llvm.amdgcn.workitem.id.x()
@@ -1705,8 +1708,9 @@ define amdgpu_kernel void @v_ctlz_zero_undef_i8_sel_eq_neg1(ptr addrspace(1) noa
; GFX9-GISEL-NEXT: global_load_ubyte v0, v[0:1], off
; GFX9-GISEL-NEXT: v_mov_b32_e32 v1, 0
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v2, v0
-; GFX9-GISEL-NEXT: v_cmp_eq_u32_sdwa s[2:3], v0, v1
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v2, 24, v0
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v2, v2
+; GFX9-GISEL-NEXT: v_cmp_eq_u32_sdwa s[2:3], v0, v1 src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-GISEL-NEXT: v_cndmask_b32_e64 v0, v2, -1, s[2:3]
; GFX9-GISEL-NEXT: global_store_byte v1, v0, s[0:1]
; GFX9-GISEL-NEXT: s_endpgm
@@ -2186,7 +2190,7 @@ define i7 @v_ctlz_zero_undef_i7(i7 %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_i7:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v0, 25, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 25, v0
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call i7 @llvm.ctlz.i7(i7 %val, i1 true)
@@ -2278,7 +2282,7 @@ define amdgpu_kernel void @s_ctlz_zero_undef_i18(ptr addrspace(1) noalias %out,
; GFX9-GISEL-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
; GFX9-GISEL-NEXT: v_mov_b32_e32 v0, 0
; GFX9-GISEL-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-GISEL-NEXT: s_lshr_b32 s0, s4, 14
+; GFX9-GISEL-NEXT: s_lshl_b32 s0, s4, 14
; GFX9-GISEL-NEXT: s_flbit_i32_b32 s0, s0
; GFX9-GISEL-NEXT: s_and_b32 s0, s0, 0x3ffff
; GFX9-GISEL-NEXT: s_lshr_b32 s1, s0, 16
@@ -2317,7 +2321,7 @@ define i18 @v_ctlz_zero_undef_i18(i18 %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_i18:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v0, 14, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 14, v0
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call i18 @llvm.ctlz.i18(i18 %val, i1 true)
@@ -2355,8 +2359,8 @@ define <2 x i18> @v_ctlz_zero_undef_v2i18(<2 x i18> %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_v2i18:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v0, 14, v0
-; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v1, 14, v1
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 14, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v1, 14, v1
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v1
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
@@ -2394,10 +2398,13 @@ define <2 x i16> @v_ctlz_zero_undef_v2i16(<2 x i16> %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_v2i16:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX9-GISEL-NEXT: s_flbit_i32_b32 s4, 0
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v1, 16, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v1
; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, s4, 16, v0
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call <2 x i16> @llvm.ctlz.v2i16(<2 x i16> %val, i1 true)
ret <2 x i16> %ctlz
@@ -2439,11 +2446,15 @@ define <3 x i16> @v_ctlz_zero_undef_v3i16(<3 x i16> %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_v3i16:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX9-GISEL-NEXT: s_flbit_i32_b32 s4, 0
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v2, v2
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, s4, 16, v0
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v1
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v2, 16, v0
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call <3 x i16> @llvm.ctlz.v3i16(<3 x i16> %val, i1 true)
ret <3 x i16> %ctlz
@@ -2492,13 +2503,20 @@ define <4 x i16> @v_ctlz_zero_undef_v4i16(<4 x i16> %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_v4i16:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
-; GFX9-GISEL-NEXT: s_flbit_i32_b32 s4, 0
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v2, 16, v0
+; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v3, 16, v1
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v1
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v2, v2
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v3, v3
; GFX9-GISEL-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-GISEL-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, s4, 16, v0
-; GFX9-GISEL-NEXT: v_lshl_or_b32 v1, s4, 16, v1
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v0, v2, 16, v0
+; GFX9-GISEL-NEXT: v_lshl_or_b32 v1, v3, 16, v1
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call <4 x i16> @llvm.ctlz.v4i16(<4 x i16> %val, i1 true)
ret <4 x i16> %ctlz
@@ -2536,8 +2554,10 @@ define <2 x i8> @v_ctlz_zero_undef_v2i8(<2 x i8> %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_v2i8:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v0, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
-; GFX9-GISEL-NEXT: v_ffbh_u32_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_3
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 24, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
+; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v1
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
%ctlz = call <2 x i8> @llvm.ctlz.v2i8(<2 x i8> %val, i1 true)
ret <2 x i8> %ctlz
@@ -2579,8 +2599,8 @@ define <2 x i7> @v_ctlz_zero_undef_v2i7(<2 x i7> %val) {
; GFX9-GISEL-LABEL: v_ctlz_zero_undef_v2i7:
; GFX9-GISEL: ; %bb.0:
; GFX9-GISEL-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v0, 25, v0
-; GFX9-GISEL-NEXT: v_lshrrev_b32_e32 v1, 25, v1
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v0, 25, v0
+; GFX9-GISEL-NEXT: v_lshlrev_b32_e32 v1, 25, v1
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v0, v0
; GFX9-GISEL-NEXT: v_ffbh_u32_e32 v1, v1
; GFX9-GISEL-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
index b71728096093..03434caee233 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomic_optimizer_fp_rtn.ll
@@ -144,8 +144,8 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_scope_agent_sco
ret float %result
}
-define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7:[0-9]+]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
; IR-ITERATIVE: 2:
@@ -177,7 +177,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un
; IR-ITERATIVE-NEXT: [[TMP25:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP23]], [[TMP16]] ]
; IR-ITERATIVE-NEXT: ret float [[TMP25]]
;
-; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8:[0-9]+]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP24:%.*]]
; IR-DPP: 2:
@@ -213,8 +213,8 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_uni_value_one_as_scope_un
ret float %result
}
-define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP18:%.*]]
; IR-ITERATIVE: 2:
@@ -262,7 +262,7 @@ define amdgpu_ps float @global_atomic_fadd_uni_address_div_value_one_as_scope_un
; IR-ITERATIVE-NEXT: [[TMP34:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-ITERATIVE-NEXT: br i1 [[TMP34]], label [[TMP10]], label [[TMP12]]
;
-; IR-DPP-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP40:%.*]]
; IR-DPP: 2:
@@ -618,8 +618,8 @@ define amdgpu_ps float @global_atomic_fmin_uni_address_div_value_agent_scope_uns
ret float %result
}
-define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP20:%.*]]
; IR-ITERATIVE: 2:
@@ -647,7 +647,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_uns
; IR-ITERATIVE-NEXT: [[TMP21:%.*]] = phi float [ poison, [[TMP0:%.*]] ], [ [[TMP19]], [[TMP12]] ]
; IR-ITERATIVE-NEXT: ret float [[TMP21]]
;
-; IR-DPP-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP20:%.*]]
; IR-DPP: 2:
@@ -679,8 +679,8 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_uni_value_agent_scope_uns
ret float %result
}
-define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP18:%.*]]
; IR-ITERATIVE: 2:
@@ -728,7 +728,7 @@ define amdgpu_ps float @global_atomic_fmax_uni_address_div_value_agent_scope_uns
; IR-ITERATIVE-NEXT: [[TMP34:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-ITERATIVE-NEXT: br i1 [[TMP34]], label [[TMP10]], label [[TMP12]]
;
-; IR-DPP-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP40:%.*]]
; IR-DPP: 2:
@@ -968,8 +968,8 @@ define amdgpu_ps float @global_atomic_fadd_div_address_div_value_agent_scope_uns
ret float %result
}
-define amdgpu_ps float @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1 {
-; IR-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
; IR-NEXT: ret float [[RESULT]]
;
@@ -977,8 +977,8 @@ define amdgpu_ps float @global_atomic_fadd_div_address_uni_value_one_as_scope_un
ret float %result
}
-define amdgpu_ps float @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1 {
-; IR-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float %val) #1 {
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
; IR-NEXT: ret float [[RESULT]]
;
@@ -1022,8 +1022,8 @@ define amdgpu_ps float @global_atomic_fmin_div_address_div_value_agent_scope(ptr
ret float %result
}
-define amdgpu_ps float @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1{
-; IR-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float inreg %val) #1{
+; IR-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
; IR-NEXT: ret float [[RESULT]]
;
@@ -1031,8 +1031,8 @@ define amdgpu_ps float @global_atomic_fmax_div_address_uni_value_agent_scope_uns
ret float %result
}
-define amdgpu_ps float @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1{
-; IR-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps float @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float %val) #1{
+; IR-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
; IR-NEXT: ret float [[RESULT]]
;
@@ -1110,8 +1110,8 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_scope_a
ret double %result
}
-define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
; IR-ITERATIVE: 2:
@@ -1149,7 +1149,7 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_
; IR-ITERATIVE-NEXT: [[TMP31:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP29]], [[TMP16]] ]
; IR-ITERATIVE-NEXT: ret double [[TMP31]]
;
-; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP30:%.*]]
; IR-DPP: 2:
@@ -1191,8 +1191,8 @@ define amdgpu_ps double @global_atomic_fadd_double_uni_address_uni_value_one_as_
ret double %result
}
-define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
-; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
; IR-NEXT: ret double [[RESULT]]
;
@@ -1338,8 +1338,8 @@ define amdgpu_ps double @global_atomic_fmin_double_uni_address_div_value_agent_s
ret double %result
}
-define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
; IR-ITERATIVE: 2:
@@ -1373,7 +1373,7 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_
; IR-ITERATIVE-NEXT: [[TMP27:%.*]] = phi double [ poison, [[TMP0:%.*]] ], [ [[TMP25]], [[TMP12]] ]
; IR-ITERATIVE-NEXT: ret double [[TMP27]]
;
-; IR-DPP-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic__fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP26:%.*]]
; IR-DPP: 2:
@@ -1411,8 +1411,8 @@ define amdgpu_ps double @global_atomic__fmax_double_uni_address_uni_value_agent_
ret double %result
}
-define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1{
-; IR-LABEL: @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
; IR-NEXT: ret double [[RESULT]]
;
@@ -1528,8 +1528,8 @@ define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_agent_s
ret double %result
}
-define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
-; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
; IR-NEXT: ret double [[RESULT]]
;
@@ -1537,8 +1537,8 @@ define amdgpu_ps double @global_atomic_fadd_double_div_address_uni_value_one_as_
ret double %result
}
-define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1 {
-; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
; IR-NEXT: ret double [[RESULT]]
;
@@ -1582,8 +1582,8 @@ define amdgpu_ps double @global_atomic_fmin_double_div_address_div_value_agent_s
ret double %result
}
-define amdgpu_ps double @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1{
-; IR-LABEL: @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double inreg %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_div_address_uni_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
; IR-NEXT: ret double [[RESULT]]
;
@@ -1591,8 +1591,8 @@ define amdgpu_ps double @global_atomic__fmax_double_div_address_uni_value_agent_
ret double %result
}
-define amdgpu_ps double @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1{
-; IR-LABEL: @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps double @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double %val) #1{
+; IR-LABEL: @global_atomic__fmax_double_div_address_div_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
; IR-NEXT: ret double [[RESULT]]
;
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
index b9234f47df19..239fe274d523 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_optimizer_fp_no_rtn.ll
@@ -117,8 +117,8 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_scope_agent_scop
ret void
}
-define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7:[0-9]+]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
@@ -142,7 +142,7 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_one_as_scope_uns
; IR-ITERATIVE: 17:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8:[0-9]+]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-DPP: 2:
@@ -170,8 +170,8 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_uni_value_one_as_scope_uns
ret void
}
-define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
@@ -208,7 +208,7 @@ define amdgpu_ps void @global_atomic_fadd_uni_address_div_value_one_as_scope_uns
; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-ITERATIVE-NEXT: br i1 [[TMP24]], label [[TMP10:%.*]], label [[TMP12]]
;
-; IR-DPP-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP33:%.*]]
; IR-DPP: 2:
@@ -494,8 +494,8 @@ define amdgpu_ps void @global_atomic_fmin_uni_address_div_value_agent_scope_unsa
ret void
}
-define amdgpu_ps void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
@@ -515,7 +515,7 @@ define amdgpu_ps void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsa
; IR-ITERATIVE: 13:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fmax_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-DPP: 2:
@@ -539,8 +539,8 @@ define amdgpu_ps void @global_atomic_fmax_uni_address_uni_value_agent_scope_unsa
ret void
}
-define amdgpu_ps void @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, float %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, float %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
@@ -577,7 +577,7 @@ define amdgpu_ps void @global_atomic_fmax_uni_address_div_value_agent_scope_unsa
; IR-ITERATIVE-NEXT: [[TMP24:%.*]] = icmp eq i32 [[TMP8]], 0
; IR-ITERATIVE-NEXT: br i1 [[TMP24]], label [[TMP10:%.*]], label [[TMP12]]
;
-; IR-DPP-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fmax_uni_address_div_value_agent_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP33:%.*]]
; IR-DPP: 2:
@@ -774,8 +774,8 @@ define amdgpu_ps void @global_atomic_fadd_div_address_div_value_agent_scope_unsa
ret void
}
-define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1 {
-; IR-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_div_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
; IR-NEXT: ret void
;
@@ -783,8 +783,8 @@ define amdgpu_ps void @global_atomic_fadd_div_address_uni_value_one_as_scope_uns
ret void
}
-define amdgpu_ps void @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1 {
-; IR-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float %val) #1 {
+; IR-LABEL: @global_atomic_fadd_div_address_div_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("one-as") monotonic, align 4
; IR-NEXT: ret void
;
@@ -828,8 +828,8 @@ define amdgpu_ps void @global_atomic_fmin_div_address_div_value_agent_scope(ptr
ret void
}
-define amdgpu_ps void @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float inreg %val) #1{
-; IR-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float inreg %val) #1{
+; IR-LABEL: @global_atomic_fmax_div_address_uni_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
; IR-NEXT: ret void
;
@@ -837,8 +837,8 @@ define amdgpu_ps void @global_atomic_fmax_div_address_uni_value_agent_scope_unsa
ret void
}
-define amdgpu_ps void @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, float %val) #1{
-; IR-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, float %val) #1{
+; IR-LABEL: @global_atomic_fmax_div_address_div_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], float [[VAL:%.*]] syncscope("agent") monotonic, align 4
; IR-NEXT: ret void
;
@@ -902,8 +902,8 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_scope_age
ret void
}
-define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
-; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1 {
+; IR-ITERATIVE-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-ITERATIVE: 2:
@@ -927,7 +927,7 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_sc
; IR-ITERATIVE: 17:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP17:%.*]]
; IR-DPP: 2:
@@ -955,8 +955,8 @@ define amdgpu_ps void @global_atomic_fadd_double_uni_address_uni_value_one_as_sc
ret void
}
-define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
-; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
; IR-NEXT: ret void
;
@@ -1060,8 +1060,8 @@ define amdgpu_ps void @global_atomic_fmin_double_uni_address_div_value_agent_sco
ret void
}
-define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
-; IR-ITERATIVE-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double inreg %val) #1{
+; IR-ITERATIVE-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-ITERATIVE-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR7]]
; IR-ITERATIVE-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-ITERATIVE: 2:
@@ -1081,7 +1081,7 @@ define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_sco
; IR-ITERATIVE: 13:
; IR-ITERATIVE-NEXT: ret void
;
-; IR-DPP-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_structfp(
+; IR-DPP-LABEL: @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe_strictfp(
; IR-DPP-NEXT: [[TMP1:%.*]] = call i1 @llvm.amdgcn.ps.live() #[[ATTR8]]
; IR-DPP-NEXT: br i1 [[TMP1]], label [[TMP2:%.*]], label [[TMP13:%.*]]
; IR-DPP: 2:
@@ -1105,8 +1105,8 @@ define amdgpu_ps void @global_atomic_fmax_double_uni_address_uni_value_agent_sco
ret void
}
-define amdgpu_ps void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) inreg %ptr, double %val) #1{
-; IR-LABEL: @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) inreg %ptr, double %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
; IR-NEXT: ret void
;
@@ -1194,8 +1194,8 @@ define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_agent_sco
ret void
}
-define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
-; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double inreg %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_uni_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
; IR-NEXT: ret void
;
@@ -1203,8 +1203,8 @@ define amdgpu_ps void @global_atomic_fadd_double_div_address_uni_value_one_as_sc
ret void
}
-define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1 {
-; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double %val) #1 {
+; IR-LABEL: @global_atomic_fadd_double_div_address_div_value_one_as_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fadd ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("one-as") monotonic, align 8
; IR-NEXT: ret void
;
@@ -1248,8 +1248,8 @@ define amdgpu_ps void @global_atomic_fmin_double_div_address_div_value_agent_sco
ret void
}
-define amdgpu_ps void @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double inreg %val) #1{
-; IR-LABEL: @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double inreg %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_div_address_uni_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
; IR-NEXT: ret void
;
@@ -1257,8 +1257,8 @@ define amdgpu_ps void @global_atomic_fmax_double_div_address_uni_value_agent_sco
ret void
}
-define amdgpu_ps void @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr, double %val) #1{
-; IR-LABEL: @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_structfp(
+define amdgpu_ps void @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr, double %val) #1{
+; IR-LABEL: @global_atomic_fmax_double_div_address_div_value_agent_scope_unsafe_strictfp(
; IR-NEXT: [[RESULT:%.*]] = atomicrmw fmax ptr addrspace(1) [[PTR:%.*]], double [[VAL:%.*]] syncscope("agent") monotonic, align 8
; IR-NEXT: ret void
;
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
index d7773f746c6a..6555ceb3ed33 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fadd.ll
@@ -1052,8 +1052,8 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
ret void
}
-define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX7LESS-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1099,7 +1099,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX7LESS-NEXT: .LBB2_3:
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1141,7 +1141,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX9-NEXT: .LBB2_3:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1181,7 +1181,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1064-NEXT: .LBB2_3:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1220,7 +1220,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1032-NEXT: .LBB2_3:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -1263,7 +1263,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1164-NEXT: .LBB2_3:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1303,7 +1303,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1132-NEXT: .LBB2_3:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1345,7 +1345,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX9-DPP-NEXT: .LBB2_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1385,7 +1385,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1064-DPP-NEXT: .LBB2_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1424,7 +1424,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1032-DPP-NEXT: .LBB2_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -1467,7 +1467,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
; GFX1164-DPP-NEXT: .LBB2_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fadd_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1511,8 +1511,8 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_uni_value_one_as_scope
}
-define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -1562,7 +1562,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1628,7 +1628,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX9-NEXT: .LBB3_5:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1694,7 +1694,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1064-NEXT: .LBB3_5:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1759,7 +1759,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1032-NEXT: .LBB3_5:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-NEXT: s_mov_b32 s14, s8
@@ -1820,7 +1820,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1164-NEXT: .LBB3_5:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-NEXT: v_mov_b32_e32 v31, v0
@@ -1880,7 +1880,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1132-NEXT: .LBB3_5:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1964,7 +1964,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX9-DPP-NEXT: .LBB3_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -2046,7 +2046,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1064-DPP-NEXT: .LBB3_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -2122,7 +2122,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1032-DPP-NEXT: .LBB3_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
@@ -2204,7 +2204,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_one_as_scope
; GFX1164-DPP-NEXT: .LBB3_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fadd_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
@@ -3461,8 +3461,8 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
}
-define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -3512,7 +3512,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3578,7 +3578,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX9-NEXT: .LBB6_5:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3644,7 +3644,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1064-NEXT: .LBB6_5:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3709,7 +3709,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1032-NEXT: .LBB6_5:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-NEXT: s_mov_b32 s14, s8
@@ -3757,7 +3757,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1164-NEXT: .LBB6_4:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-NEXT: v_mov_b32_e32 v31, v0
@@ -3804,7 +3804,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1132-NEXT: .LBB6_4:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3888,7 +3888,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX9-DPP-NEXT: .LBB6_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3970,7 +3970,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1064-DPP-NEXT: .LBB6_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -4046,7 +4046,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1032-DPP-NEXT: .LBB6_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
@@ -4115,7 +4115,7 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_agent_scope_
; GFX1164-DPP-NEXT: .LBB6_2:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fadd_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
@@ -5412,1589 +5412,875 @@ define amdgpu_kernel void @global_atomic_fadd_uni_address_div_value_default_scop
define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
-; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
-; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB9_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
-; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX7LESS-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[41:42]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2
; GFX7LESS-NEXT: .LBB9_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
-; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_cbranch_execz .LBB9_3
; GFX9-NEXT: ; %bb.1:
-; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
-; GFX9-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB9_2
; GFX9-NEXT: .LBB9_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_mov_b32 s33, s2
; GFX1064-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB9_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB9_2
; GFX1064-NEXT: .LBB9_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s33, s2
-; GFX1032-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB9_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB9_2
; GFX1032-NEXT: .LBB9_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b32 s33, s2
; GFX1164-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], exec
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-NEXT: s_cbranch_execz .LBB9_3
; GFX1164-NEXT: ; %bb.1:
-; GFX1164-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB9_2
; GFX1164-NEXT: .LBB9_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132-NEXT: v_mov_b32_e32 v40, v0
-; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1132-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-NEXT: s_cbranch_execz .LBB9_3
; GFX1132-NEXT: ; %bb.1:
-; GFX1132-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB9_2
; GFX1132-NEXT: .LBB9_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX9-DPP-NEXT: ; %bb.1:
-; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
-; GFX9-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX9-DPP-NEXT: .LBB9_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1064-DPP-NEXT: .LBB9_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1032-DPP-NEXT: .LBB9_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], exec
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1164-DPP-NEXT: ; %bb.1:
-; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1164-DPP-NEXT: .LBB9_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1132-DPP-NEXT: ; %bb.1:
-; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1132-DPP-NEXT: .LBB9_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
; GFX7LESS-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[40:41]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB10_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB10_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB10_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB10_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB10_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.float.value()
- %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 8
ret void
}
-define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
@@ -7043,7 +6329,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX7LESS-NEXT: .LBB11_3:
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7086,7 +6372,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX9-NEXT: .LBB11_3:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7127,7 +6413,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1064-NEXT: .LBB11_3:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7167,7 +6453,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1032-NEXT: .LBB11_3:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -7211,7 +6497,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1164-NEXT: .LBB11_3:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -7251,7 +6537,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1132-NEXT: .LBB11_3:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7294,7 +6580,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX9-DPP-NEXT: .LBB11_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7335,7 +6621,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1064-DPP-NEXT: .LBB11_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7375,7 +6661,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1032-DPP-NEXT: .LBB11_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -7419,7 +6705,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
; GFX1164-DPP-NEXT: .LBB11_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -7462,8 +6748,8 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_one_a
ret void
}
-define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -7516,7 +6802,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7562,7 +6848,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7608,7 +6894,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7654,7 +6940,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b32 s14, s8
; GFX1164-NEXT: s_add_u32 s8, s2, 44
@@ -7691,7 +6977,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_add_u32 s8, s2, 44
; GFX1132-NEXT: s_addc_u32 s9, s3, 0
@@ -7726,7 +7012,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7772,7 +7058,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7818,7 +7104,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7864,7 +7150,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
@@ -7901,7 +7187,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_one_a
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
@@ -8887,8 +8173,8 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
ret void
}
-define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -8941,7 +8227,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -8987,7 +8273,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9033,7 +8319,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9079,7 +8365,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b32 s14, s8
; GFX1164-NEXT: s_add_u32 s8, s2, 44
@@ -9116,7 +8402,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_add_u32 s8, s2, 44
; GFX1132-NEXT: s_addc_u32 s9, s3, 0
@@ -9151,7 +8437,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9197,7 +8483,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9243,7 +8529,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9289,7 +8575,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
@@ -9326,7 +8612,7 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
@@ -9368,1621 +8654,947 @@ define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_agent
define amdgpu_kernel void @global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp(ptr addrspace(1) %ptr) #2 {
; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
-; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
-; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB16_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
-; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX7LESS-NEXT: s_mov_b32 s1, 0x43300000
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
-; GFX7LESS-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
-; GFX7LESS-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[41:42]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB16_2
; GFX7LESS-NEXT: .LBB16_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
-; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_cbranch_execz .LBB16_3
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
-; GFX9-NEXT: s_mov_b32 s1, 0x43300000
-; GFX9-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX9-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB16_2
; GFX9-NEXT: .LBB16_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
; GFX1064-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB16_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB16_2
; GFX1064-NEXT: .LBB16_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s33, s2
-; GFX1032-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB16_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB16_2
; GFX1032-NEXT: .LBB16_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_bcnt1_i32_b64 s0, exec
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1164-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1164-NEXT: s_cbranch_execz .LBB16_3
; GFX1164-NEXT: ; %bb.1:
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: s_mov_b32 s33, s2
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB16_2
; GFX1164-NEXT: .LBB16_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1132-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off offset:16
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1132-NEXT: s_cbranch_execz .LBB16_3
; GFX1132-NEXT: ; %bb.1:
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB16_2
; GFX1132-NEXT: .LBB16_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX9-DPP-NEXT: ; %bb.1:
; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
-; GFX9-DPP-NEXT: s_mov_b32 s1, 0x43300000
-; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX9-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX9-DPP-NEXT: .LBB16_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-DPP-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1064-DPP-NEXT: .LBB16_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-DPP-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1032-DPP-NEXT: .LBB16_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, exec
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1164-DPP-NEXT: ; %bb.1:
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1164-DPP-NEXT: .LBB16_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_uni_value_default_scope_strictfp:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1132-DPP-NEXT: ; %bb.1:
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[41:42]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1132-DPP-NEXT: .LBB16_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double 4.0 monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp(ptr addrspace(1) %ptr) #2 {
; GFX7LESS-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
; GFX7LESS-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], v[40:41]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB17_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB17_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB17_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB17_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB17_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB17_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fadd_double_uni_address_div_value_default_scope_strictfp:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], v[40:41]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.float.value() strictfp
- %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ %result = atomicrmw fadd ptr addrspace(1) %ptr, double %divValue monotonic, align 8
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
index 98c09dfaa2d5..6548792180a0 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmax.ll
@@ -3554,1550 +3554,859 @@ define amdgpu_kernel void @global_atomic_fmax_uni_address_div_value_default_scop
define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB6_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], 4.0
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB6_2
; GFX7LESS-NEXT: .LBB6_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-NEXT: s_cbranch_execz .LBB6_3
; GFX9-NEXT: ; %bb.1:
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b32 s33, s2
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB6_2
; GFX9-NEXT: .LBB6_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB6_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: s_mov_b32 s33, s2
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB6_2
; GFX1064-NEXT: .LBB6_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB6_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB6_2
; GFX1032-NEXT: .LBB6_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-NEXT: s_cbranch_execz .LBB6_3
; GFX1164-NEXT: ; %bb.1:
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: s_mov_b32 s33, s2
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB6_2
; GFX1164-NEXT: .LBB6_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-NEXT: s_cbranch_execz .LBB6_3
; GFX1132-NEXT: ; %bb.1:
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB6_2
; GFX1132-NEXT: .LBB6_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX9-DPP-NEXT: ; %bb.1:
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX9-DPP-NEXT: .LBB6_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1064-DPP-NEXT: .LBB6_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1032-DPP-NEXT: .LBB6_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1164-DPP-NEXT: ; %bb.1:
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1164-DPP-NEXT: .LBB6_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1132-DPP-NEXT: ; %bb.1:
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1132-DPP-NEXT: .LBB6_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
-; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
; GFX7LESS-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB7_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB7_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB7_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB7_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB7_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB7_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_agent_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.double.value()
- %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 8
ret void
}
@@ -5963,1550 +5272,859 @@ define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_one_a
define amdgpu_kernel void @global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB10_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], 4.0
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_2
; GFX7LESS-NEXT: .LBB10_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-NEXT: s_cbranch_execz .LBB10_3
; GFX9-NEXT: ; %bb.1:
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b32 s33, s2
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB10_2
; GFX9-NEXT: .LBB10_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB10_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: s_mov_b32 s33, s2
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB10_2
; GFX1064-NEXT: .LBB10_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB10_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB10_2
; GFX1032-NEXT: .LBB10_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-NEXT: s_cbranch_execz .LBB10_3
; GFX1164-NEXT: ; %bb.1:
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: s_mov_b32 s33, s2
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB10_2
; GFX1164-NEXT: .LBB10_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-NEXT: s_cbranch_execz .LBB10_3
; GFX1132-NEXT: ; %bb.1:
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB10_2
; GFX1132-NEXT: .LBB10_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX9-DPP-NEXT: ; %bb.1:
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX9-DPP-NEXT: .LBB10_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1064-DPP-NEXT: .LBB10_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1032-DPP-NEXT: .LBB10_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1164-DPP-NEXT: ; %bb.1:
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1164-DPP-NEXT: .LBB10_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_uni_value_default_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1132-DPP-NEXT: ; %bb.1:
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[3:4], 4.0
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1132-DPP-NEXT: .LBB10_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double 4.0 monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
-; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
; GFX7LESS-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB11_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB11_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB11_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB11_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB11_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmax_double_uni_address_div_value_default_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.double.value()
- %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ %result = atomicrmw fmax ptr addrspace(1) %ptr, double %divValue monotonic, align 8
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
index 1fb0db0e1f0d..6936cdc4d379 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fmin.ll
@@ -3554,1550 +3554,859 @@ define amdgpu_kernel void @global_atomic_fmin_uni_address_div_value_default_scop
define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB6_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_min_f64 v[0:1], v[2:3], 4.0
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB6_2
; GFX7LESS-NEXT: .LBB6_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-NEXT: s_cbranch_execz .LBB6_3
; GFX9-NEXT: ; %bb.1:
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b32 s33, s2
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB6_2
; GFX9-NEXT: .LBB6_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB6_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: s_mov_b32 s33, s2
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB6_2
; GFX1064-NEXT: .LBB6_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB6_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB6_2
; GFX1032-NEXT: .LBB6_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-NEXT: s_cbranch_execz .LBB6_3
; GFX1164-NEXT: ; %bb.1:
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: s_mov_b32 s33, s2
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB6_2
; GFX1164-NEXT: .LBB6_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-NEXT: s_cbranch_execz .LBB6_3
; GFX1132-NEXT: ; %bb.1:
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB6_2
; GFX1132-NEXT: .LBB6_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX9-DPP-NEXT: ; %bb.1:
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX9-DPP-NEXT: .LBB6_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1064-DPP-NEXT: .LBB6_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1032-DPP-NEXT: .LBB6_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1164-DPP-NEXT: ; %bb.1:
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1164-DPP-NEXT: .LBB6_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB6_3
; GFX1132-DPP-NEXT: ; %bb.1:
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB6_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB6_2
; GFX1132-DPP-NEXT: .LBB6_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
-; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
; GFX7LESS-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB7_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB7_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB7_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB7_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB7_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB7_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_agent_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB7_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB7_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.double.value()
- %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 8
ret void
}
@@ -5963,1550 +5272,859 @@ define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_one_a
define amdgpu_kernel void @global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, exec_lo, 0
; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, exec_hi, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB10_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s0
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s1
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_max_f64 v[2:3], v[0:1], v[0:1]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_min_f64 v[0:1], v[2:3], 4.0
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[4:7], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[4:5], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v5
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_2
; GFX7LESS-NEXT: .LBB10_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-NEXT: s_cbranch_execz .LBB10_3
; GFX9-NEXT: ; %bb.1:
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b32 s33, s2
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v4, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB10_2
; GFX9-NEXT: .LBB10_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB10_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: s_mov_b32 s33, s2
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB10_2
; GFX1064-NEXT: .LBB10_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB10_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: s_mov_b32 s33, s2
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB10_2
; GFX1032-NEXT: .LBB10_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-NEXT: s_cbranch_execz .LBB10_3
; GFX1164-NEXT: ; %bb.1:
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: s_mov_b32 s33, s2
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB10_2
; GFX1164-NEXT: .LBB10_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-NEXT: s_cbranch_execz .LBB10_3
; GFX1132-NEXT: ; %bb.1:
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB10_2
; GFX1132-NEXT: .LBB10_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX9-DPP-NEXT: ; %bb.1:
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX9-DPP-NEXT: .LBB10_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1064-DPP-NEXT: .LBB10_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s3, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1032-DPP-NEXT: .LBB10_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, exec_hi, v0
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1164-DPP-NEXT: ; %bb.1:
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1164-DPP-NEXT: .LBB10_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_uni_value_default_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, exec_lo, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB10_3
; GFX1132-DPP-NEXT: ; %bb.1:
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB10_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], 4.0
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v4, v[0:3], s[0:1] glc
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_max_f64 v[3:4], v[1:2], v[1:2]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX1132-DPP-NEXT: v_min_f64 v[3:4], v[3:4], 4.0
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_2
; GFX1132-DPP-NEXT: .LBB10_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double 4.0 monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v40, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
-; GFX7LESS-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[2:3], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
+; GFX7LESS-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
; GFX7LESS-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
; GFX7LESS-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB11_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB11_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
; GFX1064-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB11_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
; GFX1032-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB11_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB11_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB11_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.double.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.double.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX9-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX9-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX9-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1064-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1064-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1064-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v40, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v41, s[42:43]
-; GFX1032-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[2:3], v40, s[34:35]
+; GFX1032-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1032-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v0, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v3, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1164-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1164-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1164-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fmin_double_uni_address_div_value_default_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.double.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.double.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.double.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.double.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v41, s[42:43]
-; GFX1132-DPP-NEXT: v_max_f64 v[41:42], v[0:1], v[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[2:3], v40, s[34:35]
+; GFX1132-DPP-NEXT: v_max_f64 v[4:5], v[0:1], v[0:1]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB11_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: v_max_f64 v[0:1], v[2:3], v[2:3]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[41:42]
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[2:3], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[0:1], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 8 :: v_dual_mov_b32 v1, 0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s42 :: v_dual_mov_b32 v3, s43
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[2:3], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX1132-DPP-NEXT: v_min_f64 v[0:1], v[0:1], v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v40, v[0:3], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB11_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.double.value()
- %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ %result = atomicrmw fmin ptr addrspace(1) %ptr, double %divValue monotonic, align 8
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
index c5f7980d1e3a..5cb57703c01d 100644
--- a/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
+++ b/llvm/test/CodeGen/AMDGPU/global_atomics_scan_fsub.ll
@@ -1156,8 +1156,8 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
ret void
}
-define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX7LESS-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1203,7 +1203,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX7LESS-NEXT: .LBB2_3:
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1245,7 +1245,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX9-NEXT: .LBB2_3:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1285,7 +1285,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1064-NEXT: .LBB2_3:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1324,7 +1324,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1032-NEXT: .LBB2_3:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -1367,7 +1367,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1164-NEXT: .LBB2_3:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1407,7 +1407,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1132-NEXT: .LBB2_3:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1449,7 +1449,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX9-DPP-NEXT: .LBB2_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1489,7 +1489,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1064-DPP-NEXT: .LBB2_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -1528,7 +1528,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1032-DPP-NEXT: .LBB2_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -1571,7 +1571,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
; GFX1164-DPP-NEXT: .LBB2_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fsub_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -1615,8 +1615,8 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_uni_value_one_as_scope
}
-define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -1666,7 +1666,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1732,7 +1732,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX9-NEXT: .LBB3_5:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1798,7 +1798,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1064-NEXT: .LBB3_5:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -1863,7 +1863,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1032-NEXT: .LBB3_5:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-NEXT: s_mov_b32 s14, s8
@@ -1924,7 +1924,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1164-NEXT: .LBB3_5:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-NEXT: v_mov_b32_e32 v31, v0
@@ -1984,7 +1984,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1132-NEXT: .LBB3_5:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -2068,7 +2068,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX9-DPP-NEXT: .LBB3_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -2150,7 +2150,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1064-DPP-NEXT: .LBB3_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -2226,7 +2226,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1032-DPP-NEXT: .LBB3_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
@@ -2308,7 +2308,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_one_as_scope
; GFX1164-DPP-NEXT: .LBB3_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fsub_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
@@ -3617,8 +3617,8 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
}
-define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -3668,7 +3668,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3734,7 +3734,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX9-NEXT: .LBB6_5:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3800,7 +3800,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1064-NEXT: .LBB6_5:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -3865,7 +3865,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1032-NEXT: .LBB6_5:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-NEXT: s_mov_b32 s14, s8
@@ -3926,7 +3926,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1164-NEXT: .LBB6_5:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-NEXT: v_mov_b32_e32 v31, v0
@@ -3986,7 +3986,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1132-NEXT: .LBB6_5:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -4070,7 +4070,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX9-DPP-NEXT: .LBB6_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -4152,7 +4152,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1064-DPP-NEXT: .LBB6_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -4228,7 +4228,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1032-DPP-NEXT: .LBB6_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
@@ -4310,7 +4310,7 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_agent_scope_
; GFX1164-DPP-NEXT: .LBB6_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fsub_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
@@ -5620,1589 +5620,875 @@ define amdgpu_kernel void @global_atomic_fsub_uni_address_div_value_default_scop
define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
-; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
-; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB9_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
-; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX7LESS-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
+; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s7
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[41:42]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB9_2
; GFX7LESS-NEXT: .LBB9_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
-; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_cbranch_execz .LBB9_3
; GFX9-NEXT: ; %bb.1:
-; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
-; GFX9-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB9_2
; GFX9-NEXT: .LBB9_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_mov_b32 s33, s2
; GFX1064-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB9_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB9_2
; GFX1064-NEXT: .LBB9_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s33, s2
-; GFX1032-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_mov_b32 s2, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB9_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB9_2
; GFX1032-NEXT: .LBB9_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b32 s33, s2
; GFX1164-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_mov_b64 s[4:5], exec
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-NEXT: s_cbranch_execz .LBB9_3
; GFX1164-NEXT: ; %bb.1:
-; GFX1164-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB9_2
; GFX1164-NEXT: .LBB9_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132-NEXT: v_mov_b32_e32 v40, v0
-; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1132-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-NEXT: s_cbranch_execz .LBB9_3
; GFX1132-NEXT: ; %bb.1:
-; GFX1132-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB9_2
; GFX1132-NEXT: .LBB9_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX9-DPP-NEXT: ; %bb.1:
-; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
-; GFX9-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX9-DPP-NEXT: .LBB9_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1064-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1064-DPP-NEXT: .LBB9_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
+; GFX1032-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1032-DPP-NEXT: .LBB9_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], exec
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1164-DPP-NEXT: ; %bb.1:
-; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1164-DPP-NEXT: .LBB9_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_agent_scope_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b32 s2, exec_lo
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s4, exec_lo
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v0
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB9_3
; GFX1132-DPP-NEXT: ; %bb.1:
-; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s0
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s3, s3
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_cvt_f64_u32_e32 v[0:1], s3
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], v[0:1], 4.0
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], v[0:1], 4.0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB9_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB9_2
; GFX1132-DPP-NEXT: .LBB9_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 4
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("agent") monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe(ptr addrspace(1) %ptr) #0 {
; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
; GFX7LESS-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[40:41]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB10_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB10_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB10_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB10_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB10_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB10_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_align4_unsafe:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB10_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB10_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.float.value()
- %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 4
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue syncscope("agent") monotonic, align 8
ret void
}
-define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
@@ -7251,7 +6537,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX7LESS-NEXT: .LBB11_3:
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7294,7 +6580,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX9-NEXT: .LBB11_3:
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7335,7 +6621,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1064-NEXT: .LBB11_3:
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7375,7 +6661,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1032-NEXT: .LBB11_3:
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -7419,7 +6705,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1164-NEXT: .LBB11_3:
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -7459,7 +6745,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1132-NEXT: .LBB11_3:
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7502,7 +6788,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX9-DPP-NEXT: .LBB11_3:
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7543,7 +6829,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1064-DPP-NEXT: .LBB11_3:
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
@@ -7583,7 +6869,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1032-DPP-NEXT: .LBB11_3:
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
@@ -7627,7 +6913,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
; GFX1164-DPP-NEXT: .LBB11_3:
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -7669,8 +6955,8 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_one_a
%result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 syncscope("one-as") monotonic
ret void
}
-define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -7723,7 +7009,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7769,7 +7055,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7815,7 +7101,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7861,7 +7147,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b32 s14, s8
; GFX1164-NEXT: s_add_u32 s8, s2, 44
@@ -7898,7 +7184,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_add_u32 s8, s2, 44
; GFX1132-NEXT: s_addc_u32 s9, s3, 0
@@ -7933,7 +7219,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -7979,7 +7265,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -8025,7 +7311,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -8071,7 +7357,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
@@ -8108,7 +7394,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_one_a
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_one_as_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
@@ -9094,8 +8380,8 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
ret void
}
-define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp(ptr addrspace(1) %ptr) #1 {
-; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp(ptr addrspace(1) %ptr) #1 {
+; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX7LESS: ; %bb.0:
; GFX7LESS-NEXT: s_mov_b32 s32, 0
; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
@@ -9148,7 +8434,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
-; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9194,7 +8480,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
-; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064: ; %bb.0:
; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9240,7 +8526,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
-; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032: ; %bb.0:
; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9286,7 +8572,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
-; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164: ; %bb.0:
; GFX1164-NEXT: s_mov_b32 s14, s8
; GFX1164-NEXT: s_add_u32 s8, s2, 44
@@ -9323,7 +8609,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-NEXT: s_endpgm
;
-; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132: ; %bb.0:
; GFX1132-NEXT: s_add_u32 s8, s2, 44
; GFX1132-NEXT: s_addc_u32 s9, s3, 0
@@ -9358,7 +8644,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1132-NEXT: s_endpgm
;
-; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX9-DPP: ; %bb.0:
; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9404,7 +8690,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
-; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1064-DPP: ; %bb.0:
; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9450,7 +8736,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
-; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1032-DPP: ; %bb.0:
; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
@@ -9496,7 +8782,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
-; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1164-DPP: ; %bb.0:
; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
@@ -9533,7 +8819,7 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1164-DPP-NEXT: s_endpgm
;
-; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_structfp:
+; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_agent_scope_unsafe_strictfp:
; GFX1132-DPP: ; %bb.0:
; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
@@ -9574,1621 +8860,947 @@ define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_agent
define amdgpu_kernel void @global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp(ptr addrspace(1) %ptr) #2 {
; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s42, -1
-; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s40, s40, s3
-; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s2
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], exec
-; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s0, 0
-; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s1, v0
+; GFX7LESS-NEXT: s_mov_b32 s12, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s13, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s14, -1
+; GFX7LESS-NEXT: s_mov_b32 s15, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s12, s12, s3
+; GFX7LESS-NEXT: s_addc_u32 s13, s13, 0
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], exec
+; GFX7LESS-NEXT: v_mbcnt_lo_u32_b32_e64 v0, s2, 0
+; GFX7LESS-NEXT: v_mbcnt_hi_u32_b32_e32 v0, s3, v0
; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX7LESS-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX7LESS-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX7LESS-NEXT: s_cbranch_execz .LBB16_3
; GFX7LESS-NEXT: ; %bb.1:
-; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x9
-; GFX7LESS-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
-; GFX7LESS-NEXT: s_mov_b32 s1, 0x43300000
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_load_dwordx2 s[2:3], s[36:37], 0x0
+; GFX7LESS-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
+; GFX7LESS-NEXT: s_bcnt1_i32_b64 s6, s[2:3]
+; GFX7LESS-NEXT: s_mov_b32 s7, 0x43300000
; GFX7LESS-NEXT: v_mov_b32_e32 v0, 0
; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0xc3300000
-; GFX7LESS-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
-; GFX7LESS-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], 0
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, s2
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, s3
+; GFX7LESS-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], 0
+; GFX7LESS-NEXT: s_mov_b32 s3, 0xf000
+; GFX7LESS-NEXT: v_add_f64 v[0:1], s[6:7], v[0:1]
+; GFX7LESS-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
+; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, s8
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, s9
+; GFX7LESS-NEXT: s_mov_b32 s2, -1
; GFX7LESS-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[41:42]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s34, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s35, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b32 s12, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v40
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
+; GFX7LESS-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s36
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s37
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[40:43], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[40:43], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v2
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v1
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v0
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[0:3], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[2:3]
+; GFX7LESS-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
+; GFX7LESS-NEXT: v_mov_b32_e32 v2, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v3, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[4:5]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB16_2
; GFX7LESS-NEXT: .LBB16_3:
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s42, -1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-NEXT: s_add_u32 s40, s40, s3
-; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_mov_b32 s33, s2
+; GFX9-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s10, -1
+; GFX9-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-NEXT: s_add_u32 s8, s8, s3
+; GFX9-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-NEXT: s_addc_u32 s9, s9, 0
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-NEXT: s_cbranch_execz .LBB16_3
; GFX9-NEXT: ; %bb.1:
; GFX9-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9-NEXT: v_mov_b32_e32 v1, 0xc3300000
-; GFX9-NEXT: s_mov_b32 s1, 0x43300000
-; GFX9-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
-; GFX9-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: v_mov_b32_e32 v6, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX9-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_mov_b32_e32 v3, s5
; GFX9-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX9-NEXT: s_add_u32 s8, s34, 44
-; GFX9-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-NEXT: s_mov_b32 s12, s33
-; GFX9-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-NEXT: s_cbranch_execnz .LBB16_2
; GFX9-NEXT: .LBB16_3:
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s42, -1
-; GFX1064-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-NEXT: s_mov_b32 s33, s2
+; GFX1064-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s10, -1
+; GFX1064-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s8, s8, s3
; GFX1064-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-NEXT: s_addc_u32 s9, s9, 0
; GFX1064-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
; GFX1064-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-NEXT: s_cbranch_execz .LBB16_3
; GFX1064-NEXT: ; %bb.1:
-; GFX1064-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1064-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
-; GFX1064-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v6, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-NEXT: s_mov_b32 s12, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-NEXT: s_cbranch_execnz .LBB16_2
; GFX1064-NEXT: .LBB16_3:
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s33, s2
-; GFX1032-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s42, -1
-; GFX1032-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s10, -1
+; GFX1032-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-NEXT: s_mov_b32 s2, 0
; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-NEXT: s_mov_b32 s38, 0
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-NEXT: s_cbranch_execz .LBB16_3
; GFX1032-NEXT: ; %bb.1:
-; GFX1032-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1032-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v6, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-NEXT: s_mov_b32 s12, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-NEXT: s_cbranch_execnz .LBB16_2
; GFX1032-NEXT: .LBB16_3:
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-NEXT: s_bcnt1_i32_b64 s0, exec
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-NEXT: v_mov_b32_e32 v0, 0x43300000
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-NEXT: v_mov_b32_e32 v1, s2
; GFX1164-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1164-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1164-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1164-NEXT: s_cbranch_execz .LBB16_3
; GFX1164-NEXT: ; %bb.1:
; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1164-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-NEXT: s_mov_b32 s33, s2
-; GFX1164-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-NEXT: s_waitcnt vmcnt(0)
; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b32 s12, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-NEXT: s_cbranch_execnz .LBB16_2
; GFX1164-NEXT: .LBB16_3:
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
; GFX1132-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-NEXT: s_mov_b32 s2, 0
+; GFX1132-NEXT: s_mov_b32 s3, exec_lo
; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1132-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off offset:16
-; GFX1132-NEXT: s_mov_b32 s38, 0
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1132-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1132-NEXT: s_cbranch_execz .LBB16_3
; GFX1132-NEXT: ; %bb.1:
; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1132-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-NEXT: s_mov_b32 s33, s15
+; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-NEXT: s_waitcnt vmcnt(0)
; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b32 s12, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-NEXT: s_cbranch_execnz .LBB16_2
; GFX1132-NEXT: .LBB16_3:
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s42, -1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], exec
-; GFX9-DPP-NEXT: s_mov_b32 s43, 0xe00000
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s0, 0
-; GFX9-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s1, v0
-; GFX9-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-DPP-NEXT: s_mov_b32 s33, s2
+; GFX9-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s10, -1
+; GFX9-DPP-NEXT: s_mov_b32 s11, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], exec
+; GFX9-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
+; GFX9-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
+; GFX9-DPP-NEXT: s_addc_u32 s9, s9, 0
; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: s_and_saveexec_b64 s[2:3], vcc
+; GFX9-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX9-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX9-DPP-NEXT: ; %bb.1:
; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 0
-; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s0, s[0:1]
+; GFX9-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0xc3300000
-; GFX9-DPP-NEXT: s_mov_b32 s1, 0x43300000
-; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[0:1], v[0:1]
-; GFX9-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX9-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], s[2:3], v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX9-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX9-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX9-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX9-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX9-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: s_mov_b32 s12, s33
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX9-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX9-DPP-NEXT: .LBB16_3:
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s43, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s2
+; GFX1064-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s11, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s8, s8, s3
; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], exec
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s9, 0
; GFX1064-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1064-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
; GFX1064-DPP-NEXT: v_mbcnt_hi_u32_b32 v0, s3, v0
; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 0, v0
-; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[0:1], vcc
+; GFX1064-DPP-NEXT: s_and_saveexec_b64 s[4:5], vcc
; GFX1064-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1064-DPP-NEXT: ; %bb.1:
-; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s0, s[2:3]
-; GFX1064-DPP-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1064-DPP-NEXT: s_bcnt1_i32_b64 s2, s[2:3]
+; GFX1064-DPP-NEXT: s_mov_b32 s3, 0x43300000
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1064-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x0
+; GFX1064-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1064-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1064-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[38:39]
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1064-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[2:3]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1064-DPP-NEXT: .LBB16_3:
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1032-DPP-NEXT: s_mov_b32 s2, exec_lo
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s2, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s42, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s43, 0x31c16000
+; GFX1032-DPP-NEXT: s_mov_b32 s8, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s9, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s10, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s11, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s8, s8, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s3, exec_lo
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s9, 0
+; GFX1032-DPP-NEXT: v_mbcnt_lo_u32_b32 v0, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s2, 0
; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v0
-; GFX1032-DPP-NEXT: s_add_u32 s40, s40, s3
-; GFX1032-DPP-NEXT: s_addc_u32 s41, s41, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1032-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: s_and_saveexec_b32 s0, vcc_lo
+; GFX1032-DPP-NEXT: s_and_saveexec_b32 s4, vcc_lo
; GFX1032-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1032-DPP-NEXT: ; %bb.1:
-; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s0, s2
-; GFX1032-DPP-NEXT: s_mov_b32 s1, 0x43300000
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[36:37], s[34:35], 0x24
-; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[0:1]
+; GFX1032-DPP-NEXT: s_bcnt1_i32_b32 s4, s3
+; GFX1032-DPP-NEXT: s_mov_b32 s5, 0x43300000
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x24
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, s[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[0:1], s[36:37], 0x0
-; GFX1032-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
+; GFX1032-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s4
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s5
; GFX1032-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
+; GFX1032-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[0:1], v6, v[0:3], s[0:1] glc
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[40:41]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[42:43]
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[40:43], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[40:43], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[4:5]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[40:43], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[40:43], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s38
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1032-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s2
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1032-DPP-NEXT: .LBB16_3:
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s0, exec
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
+; GFX1164-DPP-NEXT: s_bcnt1_i32_b64 s2, exec
; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s2
; GFX1164-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], exec
; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1164-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1164-DPP-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1164-DPP-NEXT: v_mbcnt_hi_u32_b32 v2, exec_hi, v2
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], exec
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX1164-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1164-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1164-DPP-NEXT: ; %bb.1:
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1164-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s2
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], 0
+; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1164-DPP-NEXT: s_load_b64 s[2:3], s[0:1], 0x0
+; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1164-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s1
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, s0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s2
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s3
+; GFX1164-DPP-NEXT: s_mov_b64 s[2:3], 0
; GFX1164-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v40
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s36
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s37
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[38:39], vcc, s[38:39]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[38:39]
+; GFX1164-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[0:1], v[2:3]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, v1
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, v0
+; GFX1164-DPP-NEXT: s_or_b64 s[2:3], vcc, s[2:3]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[2:3]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1164-DPP-NEXT: .LBB16_3:
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_uni_value_default_scope_strictfp:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[0:1]
-; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: s_bcnt1_i32_b32 s2, exec_lo
; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v0, 0x43300000
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v0, 0x43300000 :: v_dual_mov_b32 v1, s2
; GFX1132-DPP-NEXT: v_mbcnt_lo_u32_b32 v2, exec_lo, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s2, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s3, exec_lo
; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:20
-; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off offset:16
-; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off offset:16
-; GFX1132-DPP-NEXT: s_mov_b32 s38, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s0, exec_lo
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v0, off offset:4
+; GFX1132-DPP-NEXT: scratch_store_b32 off, v1, off
+; GFX1132-DPP-NEXT: scratch_load_b64 v[0:1], off, off
; GFX1132-DPP-NEXT: v_cmpx_eq_u32_e32 0, v2
; GFX1132-DPP-NEXT: s_cbranch_execz .LBB16_3
; GFX1132-DPP-NEXT: ; %bb.1:
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: v_add_f64 v[0:1], 0xc3300000, v[0:1]
-; GFX1132-DPP-NEXT: s_load_b64 s[36:37], s[34:35], 0x24
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
+; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x24
+; GFX1132-DPP-NEXT: v_mov_b32_e32 v6, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[36:37], 0x0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_mul_f64 v[41:42], 4.0, v[0:1]
+; GFX1132-DPP-NEXT: s_load_b64 s[4:5], s[0:1], 0x0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX1132-DPP-NEXT: v_mul_f64 v[4:5], 4.0, v[0:1]
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v1, s0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
; GFX1132-DPP-NEXT: .LBB16_2: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
-; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[41:42]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v40 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s36
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s37 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s38, vcc_lo, s38
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s38
+; GFX1132-DPP-NEXT: v_add_f64 v[0:1], v[2:3], -v[4:5]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[0:1], v6, v[0:3], s[0:1] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[0:1], v[2:3]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, v1 :: v_dual_mov_b32 v2, v0
+; GFX1132-DPP-NEXT: s_or_b32 s2, vcc_lo, s2
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s2
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB16_2
; GFX1132-DPP-NEXT: .LBB16_3:
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
- %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 monotonic, align 4
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double 4.0 monotonic, align 8
ret void
}
define amdgpu_kernel void @global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp(ptr addrspace(1) %ptr) #2 {
; GFX7LESS-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX7LESS: ; %bb.0:
-; GFX7LESS-NEXT: s_movk_i32 s32, 0x800
-; GFX7LESS-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX7LESS-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX7LESS-NEXT: s_mov_b32 s50, -1
-; GFX7LESS-NEXT: s_mov_b32 s51, 0xe8f000
-; GFX7LESS-NEXT: s_add_u32 s48, s48, s9
-; GFX7LESS-NEXT: s_addc_u32 s49, s49, 0
-; GFX7LESS-NEXT: s_mov_b32 s33, s8
-; GFX7LESS-NEXT: s_mov_b32 s40, s7
-; GFX7LESS-NEXT: s_mov_b32 s41, s6
-; GFX7LESS-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX7LESS-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX7LESS-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX7LESS-NEXT: s_load_dwordx2 s[44:45], s[2:3], 0x9
-; GFX7LESS-NEXT: s_mov_b32 s47, 0xf000
-; GFX7LESS-NEXT: s_mov_b32 s46, -1
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
+; GFX7LESS-NEXT: s_mov_b32 s32, 0
+; GFX7LESS-NEXT: s_mov_b32 s40, SCRATCH_RSRC_DWORD0
+; GFX7LESS-NEXT: s_mov_b32 s41, SCRATCH_RSRC_DWORD1
+; GFX7LESS-NEXT: s_mov_b32 s42, -1
+; GFX7LESS-NEXT: s_mov_b32 s43, 0xe8f000
+; GFX7LESS-NEXT: s_add_u32 s40, s40, s9
+; GFX7LESS-NEXT: s_addc_u32 s41, s41, 0
+; GFX7LESS-NEXT: s_mov_b32 s14, s8
+; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX7LESS-NEXT: s_load_dwordx2 s[36:37], s[2:3], 0x9
+; GFX7LESS-NEXT: s_mov_b32 s39, 0xf000
+; GFX7LESS-NEXT: s_mov_b32 s38, -1
+; GFX7LESS-NEXT: s_add_u32 s8, s2, 44
+; GFX7LESS-NEXT: s_addc_u32 s9, s3, 0
+; GFX7LESS-NEXT: s_getpc_b64 s[2:3]
+; GFX7LESS-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX7LESS-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX7LESS-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX7LESS-NEXT: v_lshlrev_b32_e32 v1, 10, v1
; GFX7LESS-NEXT: v_or_b32_e32 v0, v0, v1
-; GFX7LESS-NEXT: v_or_b32_e32 v42, v0, v2
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_or_b32_e32 v31, v0, v2
+; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX7LESS-NEXT: s_mov_b32 s12, s6
+; GFX7LESS-NEXT: s_mov_b32 s13, s7
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[40:41]
+; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[42:43]
; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_mov_b32_e32 v40, v0
-; GFX7LESS-NEXT: v_mov_b32_e32 v41, v1
-; GFX7LESS-NEXT: buffer_load_dwordx2 v[0:1], off, s[44:47], 0
-; GFX7LESS-NEXT: s_mov_b64 s[42:43], 0
+; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX7LESS-NEXT: buffer_load_dwordx2 v[4:5], off, s[36:39], 0
+; GFX7LESS-NEXT: s_mov_b64 s[0:1], 0
; GFX7LESS-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX7LESS-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
-; GFX7LESS-NEXT: v_add_f64 v[2:3], v[0:1], -v[40:41]
-; GFX7LESS-NEXT: buffer_store_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: buffer_store_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: s_add_u32 s8, s36, 44
-; GFX7LESS-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:12
-; GFX7LESS-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:8
-; GFX7LESS-NEXT: s_addc_u32 s9, s37, 0
-; GFX7LESS-NEXT: s_getpc_b64 s[0:1]
-; GFX7LESS-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX7LESS-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX7LESS-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX7LESS-NEXT: s_waitcnt expcnt(2)
-; GFX7LESS-NEXT: v_mov_b32_e32 v0, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v1, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v4, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v5, 8
-; GFX7LESS-NEXT: v_mov_b32_e32 v6, 0
-; GFX7LESS-NEXT: v_mov_b32_e32 v7, 0
-; GFX7LESS-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX7LESS-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX7LESS-NEXT: s_mov_b32 s12, s41
-; GFX7LESS-NEXT: s_mov_b32 s13, s40
-; GFX7LESS-NEXT: s_mov_b32 s14, s33
-; GFX7LESS-NEXT: v_mov_b32_e32 v31, v42
-; GFX7LESS-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX7LESS-NEXT: s_mov_b64 s[2:3], s[50:51]
+; GFX7LESS-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
; GFX7LESS-NEXT: s_waitcnt expcnt(0)
-; GFX7LESS-NEXT: v_mov_b32_e32 v2, s44
-; GFX7LESS-NEXT: v_mov_b32_e32 v3, s45
-; GFX7LESS-NEXT: s_waitcnt lgkmcnt(0)
-; GFX7LESS-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX7LESS-NEXT: v_and_b32_e32 v2, 1, v0
-; GFX7LESS-NEXT: buffer_load_dword v0, off, s[48:51], 0
-; GFX7LESS-NEXT: buffer_load_dword v1, off, s[48:51], 0 offset:4
-; GFX7LESS-NEXT: v_cmp_eq_u32_e32 vcc, 1, v2
-; GFX7LESS-NEXT: s_or_b64 s[42:43], vcc, s[42:43]
-; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[42:43]
+; GFX7LESS-NEXT: v_mov_b32_e32 v9, v5
+; GFX7LESS-NEXT: v_mov_b32_e32 v8, v4
+; GFX7LESS-NEXT: v_mov_b32_e32 v7, v3
+; GFX7LESS-NEXT: v_mov_b32_e32 v6, v2
+; GFX7LESS-NEXT: buffer_atomic_cmpswap_x2 v[6:9], off, s[36:39], 0 glc
+; GFX7LESS-NEXT: s_waitcnt vmcnt(0)
+; GFX7LESS-NEXT: v_cmp_eq_u64_e32 vcc, v[6:7], v[4:5]
+; GFX7LESS-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX7LESS-NEXT: v_mov_b32_e32 v4, v6
+; GFX7LESS-NEXT: v_mov_b32_e32 v5, v7
+; GFX7LESS-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX7LESS-NEXT: s_cbranch_execnz .LBB17_1
; GFX7LESS-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX7LESS-NEXT: s_endpgm
;
; GFX9-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX9: ; %bb.0:
-; GFX9-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-NEXT: s_mov_b32 s50, -1
-; GFX9-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-NEXT: s_add_u32 s48, s48, s9
-; GFX9-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-NEXT: s_mov_b32 s33, s8
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-NEXT: s_mov_b32 s40, s7
-; GFX9-NEXT: s_mov_b32 s41, s6
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-NEXT: s_mov_b32 s38, -1
+; GFX9-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-NEXT: s_add_u32 s36, s36, s9
+; GFX9-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-NEXT: s_mov_b32 s14, s8
+; GFX9-NEXT: s_add_u32 s8, s2, 44
+; GFX9-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-NEXT: s_getpc_b64 s[2:3]
+; GFX9-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: s_movk_i32 s32, 0x800
-; GFX9-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-NEXT: s_mov_b32 s12, s6
+; GFX9-NEXT: s_mov_b32 s13, s7
+; GFX9-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-NEXT: s_mov_b32 s32, 0
+; GFX9-NEXT: v_mov_b32_e32 v40, 0
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-NEXT: s_mov_b64 s[0:1], 0
; GFX9-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX9-NEXT: s_add_u32 s8, s36, 44
-; GFX9-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-NEXT: s_getpc_b64 s[0:1]
-; GFX9-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-NEXT: s_mov_b32 s12, s41
-; GFX9-NEXT: s_mov_b32 s13, s40
-; GFX9-NEXT: s_mov_b32 s14, s33
-; GFX9-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-NEXT: s_cbranch_execnz .LBB17_1
; GFX9-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-NEXT: s_endpgm
;
; GFX1064-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1064: ; %bb.0:
-; GFX1064-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-NEXT: s_mov_b32 s50, -1
-; GFX1064-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-NEXT: s_mov_b32 s33, s8
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-NEXT: s_mov_b32 s40, s7
-; GFX1064-NEXT: s_mov_b32 s41, s6
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-NEXT: s_mov_b32 s38, -1
+; GFX1064-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-NEXT: s_mov_b32 s14, s8
+; GFX1064-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-NEXT: s_mov_b32 s12, s6
+; GFX1064-NEXT: s_mov_b32 s13, s7
+; GFX1064-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-NEXT: s_mov_b32 s32, 0
+; GFX1064-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1064-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1064-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-NEXT: s_mov_b32 s12, s41
-; GFX1064-NEXT: s_mov_b32 s13, s40
-; GFX1064-NEXT: s_mov_b32 s14, s33
-; GFX1064-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-NEXT: s_clause 0x1
-; GFX1064-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_cbranch_execnz .LBB17_1
; GFX1064-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-NEXT: s_endpgm
;
; GFX1032-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1032: ; %bb.0:
-; GFX1032-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-NEXT: s_mov_b32 s50, -1
-; GFX1032-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-NEXT: s_mov_b32 s33, s8
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-NEXT: s_mov_b32 s40, s7
-; GFX1032-NEXT: s_mov_b32 s41, s6
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-NEXT: s_mov_b32 s38, -1
+; GFX1032-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-NEXT: s_mov_b32 s14, s8
+; GFX1032-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-NEXT: s_mov_b32 s12, s6
+; GFX1032-NEXT: s_mov_b32 s13, s7
+; GFX1032-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-NEXT: s_mov_b32 s32, 0
+; GFX1032-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-NEXT: s_mov_b32 s44, 0
+; GFX1032-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-NEXT: s_mov_b32 s0, 0
; GFX1032-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1032-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1032-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-NEXT: s_mov_b32 s12, s41
-; GFX1032-NEXT: s_mov_b32 s13, s40
-; GFX1032-NEXT: s_mov_b32 s14, s33
-; GFX1032-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-NEXT: s_clause 0x1
-; GFX1032-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_cbranch_execnz .LBB17_1
; GFX1032-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-NEXT: s_endpgm
;
; GFX1164-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1164: ; %bb.0:
-; GFX1164-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-NEXT: s_mov_b32 s33, s8
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-NEXT: s_mov_b32 s14, s8
+; GFX1164-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-NEXT: s_mov_b32 s12, s6
; GFX1164-NEXT: s_mov_b32 s13, s7
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_mov_b32 s32, 32
-; GFX1164-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-NEXT: s_mov_b32 s40, s7
-; GFX1164-NEXT: s_mov_b32 s41, s6
-; GFX1164-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-NEXT: s_mov_b32 s32, 0
+; GFX1164-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-NEXT: .p2align 6
+; GFX1164-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1164-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1164-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-NEXT: s_mov_b32 s12, s41
-; GFX1164-NEXT: s_mov_b32 s13, s40
-; GFX1164-NEXT: s_mov_b32 s14, s33
-; GFX1164-NEXT: s_clause 0x1
-; GFX1164-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-NEXT: s_cbranch_execnz .LBB17_1
; GFX1164-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-NEXT: s_endpgm
;
; GFX1132-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1132: ; %bb.0:
-; GFX1132-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-NEXT: s_mov_b32 s40, s14
-; GFX1132-NEXT: s_mov_b32 s41, s13
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-NEXT: s_mov_b32 s12, s13
+; GFX1132-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-NEXT: s_mov_b32 s13, s14
; GFX1132-NEXT: s_mov_b32 s14, s15
-; GFX1132-NEXT: s_mov_b32 s32, 32
-; GFX1132-NEXT: s_mov_b32 s33, s15
-; GFX1132-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-NEXT: s_mov_b32 s32, 0
; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-NEXT: s_mov_b32 s44, 0
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-NEXT: .p2align 6
+; GFX1132-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-NEXT: s_mov_b32 s0, 0
; GFX1132-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1132-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1132-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-NEXT: s_mov_b32 s12, s41
-; GFX1132-NEXT: s_mov_b32 s13, s40
-; GFX1132-NEXT: s_mov_b32 s14, s33
-; GFX1132-NEXT: s_clause 0x1
-; GFX1132-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-NEXT: s_cbranch_execnz .LBB17_1
; GFX1132-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-NEXT: s_endpgm
;
; GFX9-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX9-DPP: ; %bb.0:
-; GFX9-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX9-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX9-DPP-NEXT: s_mov_b32 s50, -1
-; GFX9-DPP-NEXT: s_mov_b32 s51, 0xe00000
-; GFX9-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX9-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[36:37], s[2:3]
-; GFX9-DPP-NEXT: s_mov_b32 s33, s8
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_mov_b32 s40, s7
-; GFX9-DPP-NEXT: s_mov_b32 s41, s6
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX9-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX9-DPP-NEXT: s_mov_b32 s38, -1
+; GFX9-DPP-NEXT: s_mov_b32 s39, 0xe00000
+; GFX9-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX9-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX9-DPP-NEXT: s_mov_b32 s14, s8
+; GFX9-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX9-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX9-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
+; GFX9-DPP-NEXT: s_getpc_b64 s[2:3]
+; GFX9-DPP-NEXT: s_add_u32 s2, s2, div.float.value@gotpcrel32@lo+4
+; GFX9-DPP-NEXT: s_addc_u32 s3, s3, div.float.value@gotpcrel32@hi+12
+; GFX9-DPP-NEXT: s_load_dwordx2 s[16:17], s[2:3], 0x0
+; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX9-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX9-DPP-NEXT: s_mov_b64 s[34:35], s[4:5]
-; GFX9-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX9-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX9-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX9-DPP-NEXT: s_mov_b32 s12, s6
+; GFX9-DPP-NEXT: s_mov_b32 s13, s7
+; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX9-DPP-NEXT: s_mov_b32 s32, 0
+; GFX9-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX9-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX9-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX9-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX9-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX9-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX9-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX9-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX9-DPP-NEXT: s_add_u32 s8, s36, 44
-; GFX9-DPP-NEXT: s_addc_u32 s9, s37, 0
-; GFX9-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX9-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX9-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX9-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX9-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX9-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX9-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX9-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX9-DPP-NEXT: s_mov_b64 s[10:11], s[34:35]
-; GFX9-DPP-NEXT: s_mov_b32 s12, s41
-; GFX9-DPP-NEXT: s_mov_b32 s13, s40
-; GFX9-DPP-NEXT: s_mov_b32 s14, s33
-; GFX9-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX9-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX9-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX9-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX9-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX9-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX9-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX9-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX9-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX9-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX9-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX9-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX9-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX9-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX9-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX9-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX9-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX9-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX9-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX9-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX9-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX9-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX9-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX9-DPP-NEXT: s_endpgm
;
; GFX1064-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1064-DPP: ; %bb.0:
-; GFX1064-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1064-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1064-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1064-DPP-NEXT: s_mov_b32 s51, 0x31e16000
-; GFX1064-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1064-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1064-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1064-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1064-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1064-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1064-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1064-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1064-DPP-NEXT: s_mov_b32 s39, 0x31e16000
+; GFX1064-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1064-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1064-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1064-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1064-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1064-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1064-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1064-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1064-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1064-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1064-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: s_movk_i32 s32, 0x800
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1064-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1064-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1064-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1064-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1064-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1064-DPP-NEXT: s_mov_b64 s[44:45], 0
+; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1064-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1064-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1064-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1064-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1064-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1064-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1064-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1064-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1064-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1064-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1064-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1064-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1064-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1064-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1064-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1064-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1064-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1064-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1064-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1064-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1064-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1064-DPP-NEXT: s_clause 0x1
-; GFX1064-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1064-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1064-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1064-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1064-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[44:45]
+; GFX1064-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1064-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1064-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1064-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1064-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1064-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1064-DPP-NEXT: s_andn2_b64 exec, exec, s[0:1]
; GFX1064-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1064-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1064-DPP-NEXT: s_endpgm
;
; GFX1032-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1032-DPP: ; %bb.0:
-; GFX1032-DPP-NEXT: s_mov_b32 s48, SCRATCH_RSRC_DWORD0
-; GFX1032-DPP-NEXT: s_mov_b32 s49, SCRATCH_RSRC_DWORD1
-; GFX1032-DPP-NEXT: s_mov_b32 s50, -1
-; GFX1032-DPP-NEXT: s_mov_b32 s51, 0x31c16000
-; GFX1032-DPP-NEXT: s_add_u32 s48, s48, s9
-; GFX1032-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1032-DPP-NEXT: s_addc_u32 s49, s49, 0
-; GFX1032-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1032-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[42:43], s[2:3], 0x24
+; GFX1032-DPP-NEXT: s_mov_b32 s36, SCRATCH_RSRC_DWORD0
+; GFX1032-DPP-NEXT: s_mov_b32 s37, SCRATCH_RSRC_DWORD1
+; GFX1032-DPP-NEXT: s_mov_b32 s38, -1
+; GFX1032-DPP-NEXT: s_mov_b32 s39, 0x31c16000
+; GFX1032-DPP-NEXT: s_add_u32 s36, s36, s9
+; GFX1032-DPP-NEXT: s_addc_u32 s37, s37, 0
+; GFX1032-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1032-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1032-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1032-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1032-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1032-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[16:17], s[4:5], 0x0
+; GFX1032-DPP-NEXT: s_load_dwordx2 s[34:35], s[2:3], 0x24
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v2, 20, v2
; GFX1032-DPP-NEXT: v_lshlrev_b32_e32 v1, 10, v1
-; GFX1032-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: v_or3_b32 v42, v0, v1, v2
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: s_movk_i32 s32, 0x400
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
+; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[36:37]
+; GFX1032-DPP-NEXT: s_mov_b32 s12, s6
+; GFX1032-DPP-NEXT: s_mov_b32 s13, s7
+; GFX1032-DPP-NEXT: v_or3_b32 v31, v0, v1, v2
+; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[38:39]
+; GFX1032-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1032-DPP-NEXT: global_load_dwordx2 v[1:2], v43, s[42:43]
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1032-DPP-NEXT: s_mov_b32 s44, 0
+; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1032-DPP-NEXT: global_load_dwordx2 v[4:5], v40, s[34:35]
+; GFX1032-DPP-NEXT: s_mov_b32 s0, 0
; GFX1032-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1032-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1032-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1032-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1032-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1032-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1032-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1032-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1032-DPP-NEXT: buffer_store_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: buffer_store_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: s_load_dwordx2 s[6:7], s[0:1], 0x0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1032-DPP-NEXT: s_mov_b64 s[0:1], s[48:49]
-; GFX1032-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1032-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1032-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1032-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1032-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1032-DPP-NEXT: s_mov_b64 s[2:3], s[50:51]
-; GFX1032-DPP-NEXT: buffer_store_dword v4, off, s[48:51], 0 offset:12
-; GFX1032-DPP-NEXT: buffer_store_dword v3, off, s[48:51], 0 offset:8
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1032-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
-; GFX1032-DPP-NEXT: s_clause 0x1
-; GFX1032-DPP-NEXT: buffer_load_dword v1, off, s[48:51], 0
-; GFX1032-DPP-NEXT: buffer_load_dword v2, off, s[48:51], 0 offset:4
-; GFX1032-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1032-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1032-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s44
+; GFX1032-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1032-DPP-NEXT: global_atomic_cmpswap_x2 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1032-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1032-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1032-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1032-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1032-DPP-NEXT: s_andn2_b32 exec_lo, exec_lo, s0
; GFX1032-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1032-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
; GFX1032-DPP-NEXT: s_endpgm
;
; GFX1164-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1164-DPP: ; %bb.0:
-; GFX1164-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1164-DPP-NEXT: s_mov_b32 s33, s8
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
+; GFX1164-DPP-NEXT: s_mov_b32 s14, s8
+; GFX1164-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1164-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1164-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1164-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1164-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1164-DPP-NEXT: s_load_b64 s[16:17], s[4:5], 0x0
+; GFX1164-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1164-DPP-NEXT: s_mov_b32 s12, s6
; GFX1164-DPP-NEXT: s_mov_b32 s13, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v42, v0
-; GFX1164-DPP-NEXT: s_mov_b32 s40, s7
-; GFX1164-DPP-NEXT: s_mov_b32 s41, s6
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v43, 0
+; GFX1164-DPP-NEXT: s_mov_b32 s32, 0
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, 0
; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v41, v1
-; GFX1164-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v40, v0
-; GFX1164-DPP-NEXT: s_mov_b64 s[44:45], 0
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1164-DPP-NEXT: .p2align 6
+; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[16:17]
+; GFX1164-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1164-DPP-NEXT: s_mov_b64 s[0:1], 0
; GFX1164-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1164-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX1164-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1164-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1164-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1164-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1164-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1164-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v31, v42
-; GFX1164-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v0, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, 8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v6, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1164-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1164-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1164-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1164-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1164-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1164-DPP-NEXT: s_clause 0x1
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1164-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v1, 0
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v2, s42
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v3, s43
-; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, 0
-; GFX1164-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1164-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1164-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1164-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1164-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1164-DPP-NEXT: v_cmp_eq_u32_e32 vcc, 1, v0
-; GFX1164-DPP-NEXT: s_or_b64 s[44:45], vcc, s[44:45]
-; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[44:45]
+; GFX1164-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1164-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1164-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1164-DPP-NEXT: v_cmp_eq_u64_e32 vcc, v[2:3], v[4:5]
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v5, v3
+; GFX1164-DPP-NEXT: v_mov_b32_e32 v4, v2
+; GFX1164-DPP-NEXT: s_or_b64 s[0:1], vcc, s[0:1]
+; GFX1164-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1164-DPP-NEXT: s_and_not1_b64 exec, exec, s[0:1]
; GFX1164-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1164-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1164-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1164-DPP-NEXT: s_endpgm
;
; GFX1132-DPP-LABEL: global_atomic_fsub_double_uni_address_div_value_default_scope_strictfp:
; GFX1132-DPP: ; %bb.0:
-; GFX1132-DPP-NEXT: s_mov_b64 s[34:35], s[2:3]
-; GFX1132-DPP-NEXT: s_mov_b64 s[38:39], s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, div.float.value@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, div.float.value@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v31, v0
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: s_load_b64 s[42:43], s[2:3], 0x24
-; GFX1132-DPP-NEXT: s_mov_b64 s[36:37], s[4:5]
-; GFX1132-DPP-NEXT: s_mov_b32 s40, s14
-; GFX1132-DPP-NEXT: s_mov_b32 s41, s13
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
+; GFX1132-DPP-NEXT: s_add_u32 s8, s2, 44
+; GFX1132-DPP-NEXT: s_addc_u32 s9, s3, 0
+; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[4:5]
+; GFX1132-DPP-NEXT: s_getpc_b64 s[4:5]
+; GFX1132-DPP-NEXT: s_add_u32 s4, s4, div.float.value@gotpcrel32@lo+4
+; GFX1132-DPP-NEXT: s_addc_u32 s5, s5, div.float.value@gotpcrel32@hi+12
+; GFX1132-DPP-NEXT: s_load_b64 s[6:7], s[4:5], 0x0
+; GFX1132-DPP-NEXT: s_load_b64 s[34:35], s[2:3], 0x24
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, 0 :: v_dual_mov_b32 v31, v0
; GFX1132-DPP-NEXT: s_mov_b32 s12, s13
+; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[0:1]
; GFX1132-DPP-NEXT: s_mov_b32 s13, s14
; GFX1132-DPP-NEXT: s_mov_b32 s14, s15
-; GFX1132-DPP-NEXT: s_mov_b32 s32, 32
-; GFX1132-DPP-NEXT: s_mov_b32 s33, s15
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v42, v0 :: v_dual_mov_b32 v43, 0
+; GFX1132-DPP-NEXT: s_mov_b32 s32, 0
; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v40, v0 :: v_dual_mov_b32 v41, v1
-; GFX1132-DPP-NEXT: global_load_b64 v[1:2], v43, s[42:43]
-; GFX1132-DPP-NEXT: s_mov_b32 s44, 0
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x1
-; GFX1132-DPP-NEXT: .p2align 6
+; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[6:7]
+; GFX1132-DPP-NEXT: global_load_b64 v[4:5], v40, s[34:35]
+; GFX1132-DPP-NEXT: s_mov_b32 s0, 0
; GFX1132-DPP-NEXT: .LBB17_1: ; %atomicrmw.start
; GFX1132-DPP-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
-; GFX1132-DPP-NEXT: v_add_f64 v[3:4], v[1:2], -v[40:41]
-; GFX1132-DPP-NEXT: s_add_u32 s8, s34, 44
-; GFX1132-DPP-NEXT: s_addc_u32 s9, s35, 0
-; GFX1132-DPP-NEXT: s_getpc_b64 s[0:1]
-; GFX1132-DPP-NEXT: s_add_u32 s0, s0, __atomic_compare_exchange@gotpcrel32@lo+4
-; GFX1132-DPP-NEXT: s_addc_u32 s1, s1, __atomic_compare_exchange@gotpcrel32@hi+12
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v31, v42 :: v_dual_mov_b32 v0, 8
-; GFX1132-DPP-NEXT: s_load_b64 s[0:1], s[0:1], 0x0
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, 8 :: v_dual_mov_b32 v6, 0
-; GFX1132-DPP-NEXT: v_mov_b32_e32 v7, 0
-; GFX1132-DPP-NEXT: s_mov_b64 s[4:5], s[38:39]
-; GFX1132-DPP-NEXT: s_mov_b64 s[10:11], s[36:37]
-; GFX1132-DPP-NEXT: s_mov_b32 s12, s41
-; GFX1132-DPP-NEXT: s_mov_b32 s13, s40
-; GFX1132-DPP-NEXT: s_mov_b32 s14, s33
-; GFX1132-DPP-NEXT: s_clause 0x1
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[1:2], off
-; GFX1132-DPP-NEXT: scratch_store_b64 off, v[3:4], off offset:8
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s42
-; GFX1132-DPP-NEXT: v_dual_mov_b32 v3, s43 :: v_dual_mov_b32 v4, 0
-; GFX1132-DPP-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1132-DPP-NEXT: s_swappc_b64 s[30:31], s[0:1]
-; GFX1132-DPP-NEXT: scratch_load_b64 v[1:2], off, off
-; GFX1132-DPP-NEXT: v_and_b32_e32 v0, 1, v0
-; GFX1132-DPP-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX1132-DPP-NEXT: v_cmp_eq_u32_e32 vcc_lo, 1, v0
-; GFX1132-DPP-NEXT: s_or_b32 s44, vcc_lo, s44
-; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s44
+; GFX1132-DPP-NEXT: v_add_f64 v[2:3], v[4:5], -v[0:1]
+; GFX1132-DPP-NEXT: global_atomic_cmpswap_b64 v[2:3], v40, v[2:5], s[34:35] glc
+; GFX1132-DPP-NEXT: s_waitcnt vmcnt(0)
+; GFX1132-DPP-NEXT: v_cmp_eq_u64_e32 vcc_lo, v[2:3], v[4:5]
+; GFX1132-DPP-NEXT: v_dual_mov_b32 v5, v3 :: v_dual_mov_b32 v4, v2
+; GFX1132-DPP-NEXT: s_or_b32 s0, vcc_lo, s0
+; GFX1132-DPP-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX1132-DPP-NEXT: s_and_not1_b32 exec_lo, exec_lo, s0
; GFX1132-DPP-NEXT: s_cbranch_execnz .LBB17_1
; GFX1132-DPP-NEXT: ; %bb.2: ; %atomicrmw.end
-; GFX1132-DPP-NEXT: s_set_inst_prefetch_distance 0x2
; GFX1132-DPP-NEXT: s_endpgm
%divValue = call double @div.float.value() strictfp
- %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue monotonic, align 4
+ %result = atomicrmw fsub ptr addrspace(1) %ptr, double %divValue monotonic, align 8
ret void
}
diff --git a/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor-constexpr-alias.ll b/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor-constexpr-alias.ll
index a883db1fa61f..95fc47469b51 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor-constexpr-alias.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor-constexpr-alias.ll
@@ -26,14 +26,14 @@ define void @bar() addrspace(1) {
}
;.
-; CHECK: @[[LLVM_GLOBAL_CTORS:[a-zA-Z0-9_$"\\.-]+]] = appending addrspace(1) global [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @foo.alias, ptr null }, { i32, ptr, ptr } { i32 1, ptr inttoptr (i64 4096 to ptr), ptr null }]
-; CHECK: @[[LLVM_GLOBAL_DTORS:[a-zA-Z0-9_$"\\.-]+]] = appending addrspace(1) global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr addrspacecast (ptr addrspace(1) @bar to ptr), ptr null }]
-; CHECK: @[[__INIT_ARRAY_START:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[__INIT_ARRAY_END:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[__FINI_ARRAY_START:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[__FINI_ARRAY_END:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[LLVM_USED:[a-zA-Z0-9_$"\\.-]+]] = appending addrspace(1) global [2 x ptr] [ptr @amdgcn.device.init, ptr @amdgcn.device.fini], section "llvm.metadata"
-; CHECK: @[[FOO_ALIAS:[a-zA-Z0-9_$"\\.-]+]] = hidden alias void (), ptr @foo
+; CHECK: @llvm.global_ctors = appending addrspace(1) global [2 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @foo.alias, ptr null }, { i32, ptr, ptr } { i32 1, ptr inttoptr (i64 4096 to ptr), ptr null }]
+; CHECK: @llvm.global_dtors = appending addrspace(1) global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr addrspacecast (ptr addrspace(1) @bar to ptr), ptr null }]
+; CHECK: @__init_array_start = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @__init_array_end = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @__fini_array_start = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @__fini_array_end = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @llvm.used = appending addrspace(1) global [2 x ptr] [ptr @amdgcn.device.init, ptr @amdgcn.device.fini], section "llvm.metadata"
+; CHECK: @foo.alias = hidden alias void (), ptr @foo
;.
; CHECK-LABEL: define void @foo(
; CHECK-SAME: ) #[[ATTR0:[0-9]+]] {
diff --git a/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor.ll b/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor.ll
index 58e1589d0483..c4f0821caacd 100644
--- a/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor.ll
+++ b/llvm/test/CodeGen/AMDGPU/lower-ctor-dtor.ll
@@ -44,13 +44,13 @@ define internal void @bar() {
}
;.
-; CHECK: @[[LLVM_GLOBAL_CTORS:[a-zA-Z0-9_$"\\.-]+]] = appending addrspace(1) global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @foo, ptr null }]
-; CHECK: @[[LLVM_GLOBAL_DTORS:[a-zA-Z0-9_$"\\.-]+]] = appending addrspace(1) global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @bar, ptr null }]
-; CHECK: @[[__INIT_ARRAY_START:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[__INIT_ARRAY_END:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[__FINI_ARRAY_START:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[__FINI_ARRAY_END:[a-zA-Z0-9_$"\\.-]+]] = external addrspace(1) constant [0 x ptr addrspace(1)]
-; CHECK: @[[LLVM_USED:[a-zA-Z0-9_$"\\.-]+]] = appending addrspace(1) global [2 x ptr] [ptr @amdgcn.device.init, ptr @amdgcn.device.fini], section "llvm.metadata"
+; CHECK: @llvm.global_ctors = appending addrspace(1) global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @foo, ptr null }]
+; CHECK: @llvm.global_dtors = appending addrspace(1) global [1 x { i32, ptr, ptr }] [{ i32, ptr, ptr } { i32 1, ptr @bar, ptr null }]
+; CHECK: @__init_array_start = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @__init_array_end = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @__fini_array_start = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @__fini_array_end = external addrspace(1) constant [0 x ptr addrspace(1)]
+; CHECK: @llvm.used = appending addrspace(1) global [2 x ptr] [ptr @amdgcn.device.init, ptr @amdgcn.device.fini], section "llvm.metadata"
;.
; CHECK-LABEL: define internal void @foo() {
; CHECK-NEXT: ret void
diff --git a/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll b/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll
new file mode 100644
index 000000000000..c7a831185b83
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/match-perm-extract-vector-elt-bug.ll
@@ -0,0 +1,109 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 %s -o - | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 %s -o - | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -verify-machineinstrs -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1100 %s -o - | FileCheck -check-prefix=GFX11 %s
+
+define amdgpu_kernel void @test(ptr addrspace(1) %src, ptr addrspace(1) %dst) {
+; GFX9-LABEL: test:
+; GFX9: ; %bb.0: ; %entry
+; GFX9-NEXT: s_load_dword s7, s[4:5], 0x1c
+; GFX9-NEXT: s_load_dword s8, s[4:5], 0x38
+; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX9-NEXT: s_waitcnt lgkmcnt(0)
+; GFX9-NEXT: s_and_b32 s4, s7, 0xffff
+; GFX9-NEXT: s_mul_i32 s6, s6, s4
+; GFX9-NEXT: s_add_i32 s8, s8, s6
+; GFX9-NEXT: v_add_u32_e32 v0, s8, v0
+; GFX9-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GFX9-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
+; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, s0, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v5, vcc
+; GFX9-NEXT: global_load_dwordx4 v[0:3], v[0:1], off
+; GFX9-NEXT: v_mov_b32_e32 v6, s3
+; GFX9-NEXT: v_add_co_u32_e32 v4, vcc, s2, v4
+; GFX9-NEXT: v_addc_co_u32_e32 v5, vcc, v6, v5, vcc
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_not_b32_e32 v3, v3
+; GFX9-NEXT: v_not_b32_e32 v2, v2
+; GFX9-NEXT: v_not_b32_e32 v1, v1
+; GFX9-NEXT: v_not_b32_e32 v0, v0
+; GFX9-NEXT: global_store_dwordx4 v[4:5], v[0:3], off
+; GFX9-NEXT: s_endpgm
+;
+; GFX10-LABEL: test:
+; GFX10: ; %bb.0: ; %entry
+; GFX10-NEXT: s_clause 0x2
+; GFX10-NEXT: s_load_dword s7, s[4:5], 0x1c
+; GFX10-NEXT: s_load_dword s8, s[4:5], 0x38
+; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
+; GFX10-NEXT: s_waitcnt lgkmcnt(0)
+; GFX10-NEXT: s_and_b32 s4, s7, 0xffff
+; GFX10-NEXT: s_mul_i32 s6, s6, s4
+; GFX10-NEXT: v_add3_u32 v0, s8, s6, v0
+; GFX10-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GFX10-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX10-NEXT: v_add_co_u32 v0, vcc_lo, s0, v4
+; GFX10-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, s1, v5, vcc_lo
+; GFX10-NEXT: v_add_co_u32 v4, vcc_lo, s2, v4
+; GFX10-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
+; GFX10-NEXT: global_load_dwordx4 v[0:3], v[0:1], off
+; GFX10-NEXT: s_waitcnt vmcnt(0)
+; GFX10-NEXT: v_not_b32_e32 v3, v3
+; GFX10-NEXT: v_not_b32_e32 v2, v2
+; GFX10-NEXT: v_not_b32_e32 v1, v1
+; GFX10-NEXT: v_not_b32_e32 v0, v0
+; GFX10-NEXT: global_store_dwordx4 v[4:5], v[0:3], off
+; GFX10-NEXT: s_endpgm
+;
+; GFX11-LABEL: test:
+; GFX11: ; %bb.0: ; %entry
+; GFX11-NEXT: s_clause 0x2
+; GFX11-NEXT: s_load_b32 s4, s[0:1], 0x1c
+; GFX11-NEXT: s_load_b32 s5, s[0:1], 0x38
+; GFX11-NEXT: s_load_b128 s[0:3], s[0:1], 0x0
+; GFX11-NEXT: s_waitcnt lgkmcnt(0)
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_mul_i32 s15, s15, s4
+; GFX11-NEXT: v_add3_u32 v0, s5, s15, v0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_ashrrev_i32_e32 v1, 31, v0
+; GFX11-NEXT: v_lshlrev_b64 v[4:5], 4, v[0:1]
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, s0, v4
+; GFX11-NEXT: v_add_co_ci_u32_e32 v1, vcc_lo, s1, v5, vcc_lo
+; GFX11-NEXT: v_add_co_u32 v4, vcc_lo, s2, v4
+; GFX11-NEXT: v_add_co_ci_u32_e32 v5, vcc_lo, s3, v5, vcc_lo
+; GFX11-NEXT: global_load_b128 v[0:3], v[0:1], off
+; GFX11-NEXT: s_waitcnt vmcnt(0)
+; GFX11-NEXT: v_not_b32_e32 v3, v3
+; GFX11-NEXT: v_not_b32_e32 v2, v2
+; GFX11-NEXT: v_not_b32_e32 v1, v1
+; GFX11-NEXT: v_not_b32_e32 v0, v0
+; GFX11-NEXT: global_store_b128 v[4:5], v[0:3], off
+; GFX11-NEXT: s_nop 0
+; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
+; GFX11-NEXT: s_endpgm
+entry:
+ %implicitarg.ptr = tail call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
+ %arg.1.ptr = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 40
+ %arg.1 = load i64, ptr addrspace(4) %arg.1.ptr, align 8
+ %workgroup.id.x = tail call i32 @llvm.amdgcn.workgroup.id.x()
+ %arg.2.ptr = getelementptr inbounds i8, ptr addrspace(4) %implicitarg.ptr, i64 12
+ %arg.2 = load i16, ptr addrspace(4) %arg.2.ptr, align 4
+ %arg.2.ext = zext i16 %arg.2 to i32
+ %mul = mul i32 %workgroup.id.x, %arg.2.ext
+ %workitem.id.x = tail call i32 @llvm.amdgcn.workitem.id.x()
+ %add = add i32 %mul, %workitem.id.x
+ %add.ext = zext i32 %add to i64
+ %add.1 = add i64 %arg.1, %add.ext
+ %sext = shl i64 %add.1, 32
+ %idxprom = ashr exact i64 %sext, 32
+ %arrayidx = getelementptr inbounds <16 x i8>, ptr addrspace(1) %src, i64 %idxprom
+ %arrayval = load <16 x i8>, ptr addrspace(1) %arrayidx, align 16
+ %not = xor <16 x i8> %arrayval, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
+ %arrayidx2 = getelementptr inbounds <16 x i8>, ptr addrspace(1) %dst, i64 %idxprom
+ store <16 x i8> %not, ptr addrspace(1) %arrayidx2, align 16
+ ret void
+}
diff --git a/llvm/test/CodeGen/AMDGPU/permute_i8.ll b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
index 8ac332197215..7ca9ae359a49 100644
--- a/llvm/test/CodeGen/AMDGPU/permute_i8.ll
+++ b/llvm/test/CodeGen/AMDGPU/permute_i8.ll
@@ -3816,13 +3816,15 @@ define hidden void @extract_v13i64(ptr addrspace(1) %in0, ptr addrspace(1) %in1,
; GFX10-LABEL: extract_v13i64:
; GFX10: ; %bb.0:
; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX10-NEXT: s_clause 0x1
-; GFX10-NEXT: global_load_dwordx4 v[8:11], v[0:1], off
-; GFX10-NEXT: global_load_dwordx4 v[12:15], v[0:1], off offset:16
+; GFX10-NEXT: s_clause 0x2
+; GFX10-NEXT: global_load_dwordx4 v[8:11], v[0:1], off offset:48
+; GFX10-NEXT: global_load_dwordx4 v[11:14], v[0:1], off
+; GFX10-NEXT: global_load_dwordx4 v[14:17], v[0:1], off offset:64
+; GFX10-NEXT: ; kill: killed $vgpr0 killed $vgpr1
; GFX10-NEXT: s_waitcnt vmcnt(1)
-; GFX10-NEXT: v_perm_b32 v0, v9, v8, 0x3020504
+; GFX10-NEXT: v_perm_b32 v0, v12, v13, 0x1000504
; GFX10-NEXT: s_waitcnt vmcnt(0)
-; GFX10-NEXT: v_perm_b32 v1, v11, v12, 0x1000706
+; GFX10-NEXT: v_perm_b32 v1, v10, v14, 0x1000504
; GFX10-NEXT: global_store_dword v[4:5], v0, off
; GFX10-NEXT: global_store_dword v[6:7], v1, off
; GFX10-NEXT: s_setpc_b64 s[30:31]
@@ -3830,14 +3832,15 @@ define hidden void @extract_v13i64(ptr addrspace(1) %in0, ptr addrspace(1) %in1,
; GFX9-LABEL: extract_v13i64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: global_load_dwordx4 v[8:11], v[0:1], off
-; GFX9-NEXT: global_load_dwordx4 v[12:15], v[0:1], off offset:16
-; GFX9-NEXT: s_mov_b32 s4, 0x3020504
-; GFX9-NEXT: s_mov_b32 s5, 0x1000706
+; GFX9-NEXT: global_load_dwordx4 v[8:11], v[0:1], off offset:48
+; GFX9-NEXT: global_load_dwordx4 v[11:14], v[0:1], off
+; GFX9-NEXT: global_load_dwordx4 v[14:17], v[0:1], off offset:64
+; GFX9-NEXT: s_mov_b32 s4, 0x1000504
+; GFX9-NEXT: ; kill: killed $vgpr0 killed $vgpr1
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_perm_b32 v0, v9, v8, s4
+; GFX9-NEXT: v_perm_b32 v0, v12, v13, s4
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_perm_b32 v1, v11, v12, s5
+; GFX9-NEXT: v_perm_b32 v1, v10, v14, s4
; GFX9-NEXT: global_store_dword v[4:5], v0, off
; GFX9-NEXT: global_store_dword v[6:7], v1, off
; GFX9-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/simplify-libcalls.ll b/llvm/test/CodeGen/AMDGPU/simplify-libcalls.ll
index 54ca33401ccf..5a241f85b2e2 100644
--- a/llvm/test/CodeGen/AMDGPU/simplify-libcalls.ll
+++ b/llvm/test/CodeGen/AMDGPU/simplify-libcalls.ll
@@ -475,8 +475,7 @@ entry:
declare float @_Z5rootnfi(float, i32)
; GCN-LABEL: {{^}}define amdgpu_kernel void @test_rootn_2
-; GCN-POSTLINK: call fast float @_Z5rootnfi(float %tmp, i32 2)
-; GCN-PRELINK: %__rootn2sqrt = tail call fast float @llvm.sqrt.f32(float %tmp)
+; GCN: call fast float @llvm.sqrt.f32(float %tmp)
define amdgpu_kernel void @test_rootn_2(ptr addrspace(1) nocapture %a) {
entry:
%tmp = load float, ptr addrspace(1) %a, align 4
@@ -507,8 +506,8 @@ entry:
}
; GCN-LABEL: {{^}}define amdgpu_kernel void @test_rootn_m2
-; GCN-POSTLINK: call fast float @_Z5rootnfi(float %tmp, i32 -2)
-; GCN-PRELINK: %__rootn2rsqrt = tail call fast float @_Z5rsqrtf(float %tmp)
+; GCN: [[SQRT:%.+]] = tail call fast float @llvm.sqrt.f32(float %tmp)
+; GCN-NEXT: fdiv fast float 1.000000e+00, [[SQRT]]
define amdgpu_kernel void @test_rootn_m2(ptr addrspace(1) nocapture %a) {
entry:
%tmp = load float, ptr addrspace(1) %a, align 4
diff --git a/llvm/test/CodeGen/ARM/exp10-libcall-names.ll b/llvm/test/CodeGen/ARM/exp10-libcall-names.ll
new file mode 100644
index 000000000000..0ac68b3e8c46
--- /dev/null
+++ b/llvm/test/CodeGen/ARM/exp10-libcall-names.ll
@@ -0,0 +1,39 @@
+; RUN: llc -mtriple=armv7-linux-gnu < %s | FileCheck -check-prefix=LINUX %s
+; RUN: llc -mtriple=armv7-apple-macos10.9 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=armv7-apple-ios7.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=armv7-apple-tvos7.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=armv7-apple-watchos7.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=armv7-apple-xros7.0 < %s | FileCheck -check-prefix=APPLE %s
+
+; RUN: not llc -mtriple=armv7-apple-macos10.8 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=armv7-apple-ios6.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=armv7-apple-tvos6.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=armv7-apple-xros6.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+
+; Check exp10/exp10f is emitted as __exp10/__exp10f on assorted systems.
+
+; ERR: no libcall available for fexp10
+
+define float @test_exp10_f32(float %x) {
+; LINUX-LABEL: test_exp10_f32:
+; LINUX: @ %bb.0:
+; LINUX-NEXT: b exp10f
+;
+; APPLE-LABEL: test_exp10_f32:
+; APPLE: @ %bb.0:
+; APPLE-NEXT: b ___exp10f
+ %ret = call float @llvm.exp10.f32(float %x)
+ ret float %ret
+}
+
+define double @test_exp10_f64(double %x) {
+; LINUX-LABEL: test_exp10_f64:
+; LINUX: @ %bb.0:
+; LINUX-NEXT: b exp10
+;
+; APPLE-LABEL: test_exp10_f64:
+; APPLE: @ %bb.0:
+; APPLE-NEXT: b ___exp10
+ %ret = call double @llvm.exp10.f64(double %x)
+ ret double %ret
+}
diff --git a/llvm/test/CodeGen/BPF/xadd.ll b/llvm/test/CodeGen/BPF/xadd.ll
index 4901d9380ac4..5aeeb9baf7b8 100644
--- a/llvm/test/CodeGen/BPF/xadd.ll
+++ b/llvm/test/CodeGen/BPF/xadd.ll
@@ -22,7 +22,7 @@ entry:
call void @llvm.dbg.value(metadata ptr %ptr, metadata !13, metadata !DIExpression()), !dbg !15
%0 = atomicrmw add ptr %ptr, i32 4 seq_cst, !dbg !16
%1 = atomicrmw add ptr %ptr, i32 6 seq_cst, !dbg !17
-; CHECK: line 4: Invalid usage of the XADD return value
+; CHECK: in function test i32 (ptr): Invalid usage of the XADD return value
call void @llvm.dbg.value(metadata i32 %1, metadata !14, metadata !DIExpression()), !dbg !18
ret i32 %1, !dbg !19
}
diff --git a/llvm/test/CodeGen/NVPTX/param-overalign.ll b/llvm/test/CodeGen/NVPTX/param-overalign.ll
index 63e706982f39..5c09bb8e1a5d 100644
--- a/llvm/test/CodeGen/NVPTX/param-overalign.ll
+++ b/llvm/test/CodeGen/NVPTX/param-overalign.ll
@@ -1,5 +1,5 @@
; RUN: llc < %s -march=nvptx | FileCheck %s
-; RUN: %if ptxas %{ llc < %s -march=nvptx -verify-machineinstrs | %ptxas-verify %}
+; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -verify-machineinstrs | %ptxas-verify %}
target triple = "nvptx64-nvidia-cuda"
diff --git a/llvm/test/CodeGen/NVPTX/st-param-imm.ll b/llvm/test/CodeGen/NVPTX/st-param-imm.ll
index d9e005719238..29f27c1ba6cd 100644
--- a/llvm/test/CodeGen/NVPTX/st-param-imm.ll
+++ b/llvm/test/CodeGen/NVPTX/st-param-imm.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
; RUN: llc < %s -march=nvptx64 | FileCheck %s
; RUN: llc < %s -march=nvptx | FileCheck %s
-; RUN: %if ptxas %{ llc < %s -march=nvptx -verify-machineinstrs | %ptxas-verify %}
+; RUN: %if ptxas && !ptxas-12.0 %{ llc < %s -march=nvptx -verify-machineinstrs | %ptxas-verify %}
; RUN: %if ptxas %{ llc < %s -march=nvptx64 -verify-machineinstrs | %ptxas-verify %}
target triple = "nvptx64-nvidia-cuda"
diff --git a/llvm/test/CodeGen/PowerPC/toc-data.ll b/llvm/test/CodeGen/PowerPC/toc-data.ll
index 7f7afe76cfcd..1a1078095452 100644
--- a/llvm/test/CodeGen/PowerPC/toc-data.ll
+++ b/llvm/test/CodeGen/PowerPC/toc-data.ll
@@ -16,6 +16,10 @@
; RUN: -stop-before=ppc-vsx-copy | FileCheck %s --check-prefix CHECK32LARGE
; RUN: llc -mtriple powerpc-ibm-aix-xcoff -code-model=large -verify-machineinstrs < %s | FileCheck %s --check-prefix TEST32LARGE
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -code-model=large -verify-machineinstrs < %s \
+; RUN: -stop-before=ppc-vsx-copy | FileCheck %s --check-prefix CHECK64LARGE
+; RUN: llc -mtriple powerpc64-ibm-aix-xcoff -code-model=large -verify-machineinstrs < %s | FileCheck %s --check-prefix TEST64LARGE
+
; Global variables i and f have the toc-data attribute.
; In the following functions, those writing to or reading from
; variables i and f should use the toc-data access pattern.
@@ -63,6 +67,17 @@ define dso_local void @write_int(i32 signext %in) {
; TEST32LARGE-NEXT: la 4, i[TD]@l(4)
; TEST32LARGE-NEXT: stw 3, 0(4)
+
+; CHECK64LARGE: name: write_int
+; CHECK64LARGE: %[[SCRATCH1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @i
+; CHECK64LARGE-NEXT: %[[SCRATCH2:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDItocL8 killed %[[SCRATCH1]], @i
+; CHECK64LARGE-NEXT: STW8 %{{[0-9]+}}, 0, killed %[[SCRATCH2]] :: (store (s32) into @i)
+
+; TEST64LARGE: .write_int:
+; TEST64LARGE: addis 4, i[TD]@u(2)
+; TEST64LARGE-NEXT: la 4, i[TD]@l(4)
+; TEST64LARGE-NEXT: stw 3, 0(4)
+
define dso_local i64 @read_ll() {
entry:
%0 = load i64, ptr @ll, align 8
@@ -98,6 +113,15 @@ define dso_local i64 @read_ll() {
; TEST32LARGE-NEXT: lwz 3, 0(4)
; TEST32LARGE-NEXT: lwz 4, 4(4)
+; CHECK64LARGE: name: read_ll
+; CHECK64LARGE: %[[SCRATCH1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @ll
+; CHECK64LARGE: LDtocL @ll, killed %[[SCRATCH1]] :: (load (s64) from got)
+
+; TEST64LARGE: .read_ll:
+; TEST64LARGE: addis 3, L..C0@u(2)
+; TEST64LARGE-NEXT: ld 3, L..C0@l(3)
+; TEST64LARGE-NEXT: ld 3, 0(3)
+
define dso_local float @read_float() {
entry:
%0 = load float, ptr @f, align 4
@@ -134,6 +158,18 @@ define dso_local float @read_float() {
; TEST32LARGE-NEXT: la 3, f[TD]@l(3)
; TEST32LARGE-NEXT: lfs 1, 0(3)
+
+; CHECK64LARGE: name: read_float
+; CHECK64LARGE: %[[SCRATCH1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @f
+; CHECK64LARGE-NEXT: %[[SCRATCH2:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDItocL8 killed %[[SCRATCH1]], @f
+; CHECK64LARGE-NEXT: LFS 0, killed %[[SCRATCH2]] :: (dereferenceable load (s32) from @f)
+
+
+; TEST64LARGE: .read_float:
+; TEST64LARGE: addis 3, f[TD]@u(2)
+; TEST64LARGE-NEXT: la 3, f[TD]@l(3)
+; TEST64LARGE-NEXT: lfs 1, 0(3)
+
define dso_local void @write_double(double %in) {
entry:
store double %in, ptr @d, align 8
@@ -167,6 +203,15 @@ define dso_local void @write_double(double %in) {
; TEST32LARGE-NEXT: lwz 3, L..C1@l(3)
; TEST32LARGE-NEXT: stfd 1, 0(3)
+; CHECK64LARGE: name: write_double
+; CHECK64LARGE: %[[SCRATCH1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @d
+; CHECK64LARGE: LDtocL @d, killed %[[SCRATCH1]] :: (load (s64) from got)
+
+; TEST64LARGE: .write_double:
+; TEST64LARGE: addis 3, L..C1@u(2)
+; TEST64LARGE-NEXT: ld 3, L..C1@l(3)
+; TEST64LARGE-NEXT: stfd 1, 0(3)
+
define dso_local nonnull ptr @addr() {
entry:
ret ptr @i
@@ -237,4 +282,26 @@ define dso_local nonnull ptr @addr() {
; TEST32LARGE-NEXT: .globl f[TD]
; TEST32LARGE-NOT: .tc f[TE],f[RW]
+; CHECK64LARGE: name: addr
+; CHECK64LARGE: %[[SCRATCH1:[0-9]+]]:g8rc_and_g8rc_nox0 = ADDIStocHA8 $x2, @i
+; CHECK64LARGE-NEXT: %[[SCRATCH2:[0-9]+]]:g8rc = ADDItocL8 killed %[[SCRATCH1]], @i
+; CHECK64LARGE-NEXT: $x3 = COPY %[[SCRATCH2]]
+
+; TEST64LARGE: .addr:
+; TEST64LARGE: addis 3, i[TD]@u(2)
+; TEST64LARGE: la 3, i[TD]@l(3)
+
+; TEST64LARGE: .toc
+; TEST64LARGE: .tc ll[TE],ll[RW]
+; TEST64LARGE-NOT: .csect ll[TD]
+; TEST64LARGE: .tc d[TE],d[RW]
+; TEST64LARGE-NOT: .csect d[TD],2
+; TEST64LARGE: .csect i[TD],2
+; TEST64LARGE-NEXT: .globl i[TD]
+; TEST64LARGE-NEXT: .align 2
+; TEST64LARGE-NOT: .tc i[TE],i[RW]
+; TEST64LARGE: .csect f[TD],2
+; TEST64LARGE-NEXT: .globl f[TD]
+; TEST64LARGE-NOT: .tc f[TE],f[RW]
+
attributes #0 = { "toc-data" }
diff --git a/llvm/test/CodeGen/PowerPC/vec_shuffle.ll b/llvm/test/CodeGen/PowerPC/vec_shuffle.ll
index e698ab1e15a9..22b5ff0d21cb 100644
--- a/llvm/test/CodeGen/PowerPC/vec_shuffle.ll
+++ b/llvm/test/CodeGen/PowerPC/vec_shuffle.ll
@@ -32,7 +32,7 @@ entry:
%tmp15 = extractelement <16 x i8> %tmp2.upgrd.2, i32 2 ; <i8> [#uses=1]
%tmp16 = extractelement <16 x i8> %tmp2.upgrd.2, i32 3 ; <i8> [#uses=1]
%tmp17 = extractelement <16 x i8> %tmp2.upgrd.2, i32 4 ; <i8> [#uses=1]
- %tmp18 = insertelement <16 x i8> undef, i8 %tmp.upgrd.3, i32 0 ; <<16 x i8>> [#uses=1]
+ %tmp18 = insertelement <16 x i8> poison, i8 %tmp.upgrd.3, i32 0 ; <<16 x i8>> [#uses=1]
%tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp3, i32 1 ; <<16 x i8>> [#uses=1]
%tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
%tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 3 ; <<16 x i8>> [#uses=1]
@@ -80,7 +80,7 @@ define void @VSLDOI_xx(ptr %A, ptr %B) {
%tmp15 = extractelement <16 x i8> %tmp2.upgrd.6, i32 2 ; <i8> [#uses=1]
%tmp16 = extractelement <16 x i8> %tmp2.upgrd.6, i32 3 ; <i8> [#uses=1]
%tmp17 = extractelement <16 x i8> %tmp2.upgrd.6, i32 4 ; <i8> [#uses=1]
- %tmp18 = insertelement <16 x i8> undef, i8 %tmp.upgrd.7, i32 0 ; <<16 x i8>> [#uses=1]
+ %tmp18 = insertelement <16 x i8> poison, i8 %tmp.upgrd.7, i32 0 ; <<16 x i8>> [#uses=1]
%tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp3, i32 1 ; <<16 x i8>> [#uses=1]
%tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
%tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 3 ; <<16 x i8>> [#uses=1]
@@ -150,7 +150,7 @@ entry:
%tmp15 = extractelement <16 x i8> %tmp2, i32 14 ; <i8> [#uses=1]
%tmp16 = extractelement <16 x i8> %tmp, i32 15 ; <i8> [#uses=1]
%tmp17 = extractelement <16 x i8> %tmp2, i32 15 ; <i8> [#uses=1]
- %tmp18 = insertelement <16 x i8> undef, i8 %tmp.upgrd.12, i32 0 ; <<16 x i8>> [#uses=1]
+ %tmp18 = insertelement <16 x i8> poison, i8 %tmp.upgrd.12, i32 0 ; <<16 x i8>> [#uses=1]
%tmp19 = insertelement <16 x i8> %tmp18, i8 %tmp3, i32 1 ; <<16 x i8>> [#uses=1]
%tmp20 = insertelement <16 x i8> %tmp19, i8 %tmp4, i32 2 ; <<16 x i8>> [#uses=1]
%tmp21 = insertelement <16 x i8> %tmp20, i8 %tmp5, i32 3 ; <<16 x i8>> [#uses=1]
@@ -189,7 +189,7 @@ entry:
%tmp7 = extractelement <8 x i16> %tmp2, i32 6 ; <i16> [#uses=1]
%tmp8 = extractelement <8 x i16> %tmp, i32 7 ; <i16> [#uses=1]
%tmp9 = extractelement <8 x i16> %tmp2, i32 7 ; <i16> [#uses=1]
- %tmp10 = insertelement <8 x i16> undef, i16 %tmp.upgrd.13, i32 0 ; <<8 x i16>> [#uses=1]
+ %tmp10 = insertelement <8 x i16> poison, i16 %tmp.upgrd.13, i32 0 ; <<8 x i16>> [#uses=1]
%tmp11 = insertelement <8 x i16> %tmp10, i16 %tmp3, i32 1 ; <<8 x i16>> [#uses=1]
%tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp4, i32 2 ; <<8 x i16>> [#uses=1]
%tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 3 ; <<8 x i16>> [#uses=1]
@@ -216,7 +216,7 @@ entry:
%tmp3 = extractelement <4 x i32> %tmp2, i32 2 ; <i32> [#uses=1]
%tmp4 = extractelement <4 x i32> %tmp, i32 3 ; <i32> [#uses=1]
%tmp5 = extractelement <4 x i32> %tmp2, i32 3 ; <i32> [#uses=1]
- %tmp6 = insertelement <4 x i32> undef, i32 %tmp.upgrd.14, i32 0 ; <<4 x i32>> [#uses=1]
+ %tmp6 = insertelement <4 x i32> poison, i32 %tmp.upgrd.14, i32 0 ; <<4 x i32>> [#uses=1]
%tmp7 = insertelement <4 x i32> %tmp6, i32 %tmp3, i32 1 ; <<4 x i32>> [#uses=1]
%tmp8 = insertelement <4 x i32> %tmp7, i32 %tmp4, i32 2 ; <<4 x i32>> [#uses=1]
%tmp9 = insertelement <4 x i32> %tmp8, i32 %tmp5, i32 3 ; <<4 x i32>> [#uses=1]
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll b/llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll
new file mode 100644
index 000000000000..fad9effdd403
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/freeze.ll
@@ -0,0 +1,201 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=riscv32 -mattr=+f,+d,+zfh,+m,+v -global-isel -global-isel-abort=1 -verify-machineinstrs < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+zfh,+m,+v -global-isel -global-isel-abort=1 -verify-machineinstrs < %s 2>&1 | FileCheck %s --check-prefixes=CHECK,RV64
+
+define i32 @freeze_int(i32 %x) {
+; RV32-LABEL: freeze_int:
+; RV32: # %bb.0:
+; RV32-NEXT: mul a0, a0, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_int:
+; RV64: # %bb.0:
+; RV64-NEXT: mulw a0, a0, a0
+; RV64-NEXT: ret
+ %y1 = freeze i32 %x
+ %t1 = mul i32 %y1, %y1
+ ret i32 %t1
+}
+
+define i5 @freeze_int2(i5 %x) {
+; RV32-LABEL: freeze_int2:
+; RV32: # %bb.0:
+; RV32-NEXT: mul a0, a0, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_int2:
+; RV64: # %bb.0:
+; RV64-NEXT: mulw a0, a0, a0
+; RV64-NEXT: ret
+ %y1 = freeze i5 %x
+ %t1 = mul i5 %y1, %y1
+ ret i5 %t1
+}
+
+define float @freeze_float(float %x) {
+; CHECK-LABEL: freeze_float:
+; CHECK: # %bb.0:
+; CHECK-NEXT: fadd.s fa0, fa0, fa0
+; CHECK-NEXT: ret
+ %y1 = freeze float %x
+ %t1 = fadd float %y1, %y1
+ ret float %t1
+}
+
+define double @freeze_double(double %x) nounwind {
+; RV32-LABEL: freeze_double:
+; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: fsd fa0, 8(sp)
+; RV32-NEXT: lw a0, 8(sp)
+; RV32-NEXT: lw a1, 12(sp)
+; RV32-NEXT: sw a0, 8(sp)
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: fld fa5, 8(sp)
+; RV32-NEXT: fadd.d fa0, fa5, fa5
+; RV32-NEXT: addi sp, sp, 16
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_double:
+; RV64: # %bb.0:
+; RV64-NEXT: fadd.d fa0, fa0, fa0
+; RV64-NEXT: ret
+ %y1 = freeze double %x
+ %t1 = fadd double %y1, %y1
+ ret double %t1
+}
+
+define void @freeze_half(ptr %p) {
+; CHECK-LABEL: freeze_half:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lh a1, 0(a0)
+; CHECK-NEXT: sh a1, 0(a0)
+; CHECK-NEXT: ret
+ %x = load half, ptr %p
+ %y1 = freeze half %x
+ store half %y1, ptr %p
+ ret void
+}
+
+define <vscale x 2 x i32> @freeze_ivec(<vscale x 2 x i32> %x) {
+; CHECK-LABEL: freeze_ivec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ret
+ %y = freeze <vscale x 2 x i32> %x
+ ret <vscale x 2 x i32> %y
+}
+
+define <vscale x 2 x float> @freeze_fvec(<vscale x 2 x float> %x) {
+; CHECK-LABEL: freeze_fvec:
+; CHECK: # %bb.0:
+; CHECK-NEXT: ret
+ %y = freeze <vscale x 2 x float> %x
+ ret <vscale x 2 x float> %y
+}
+
+define ptr @freeze_ptr(ptr %x) {
+; CHECK-LABEL: freeze_ptr:
+; CHECK: # %bb.0:
+; CHECK-NEXT: addi a0, a0, 4
+; CHECK-NEXT: ret
+ %y1 = freeze ptr %x
+ %t1 = getelementptr i8, ptr %y1, i64 4
+ ret ptr %t1
+}
+
+%struct.T = type { i32, i32 }
+
+define i32 @freeze_struct(ptr %p) {
+; RV32-LABEL: freeze_struct:
+; RV32: # %bb.0:
+; RV32-NEXT: lw a1, 0(a0)
+; RV32-NEXT: lw a0, 4(a0)
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_struct:
+; RV64: # %bb.0:
+; RV64-NEXT: lw a1, 0(a0)
+; RV64-NEXT: lw a0, 4(a0)
+; RV64-NEXT: addw a0, a1, a0
+; RV64-NEXT: ret
+ %s = load %struct.T, ptr %p
+ %y1 = freeze %struct.T %s
+ %v1 = extractvalue %struct.T %y1, 0
+ %v2 = extractvalue %struct.T %y1, 1
+ %t1 = add i32 %v1, %v2
+ ret i32 %t1
+}
+
+define i32 @freeze_anonstruct(ptr %p) {
+; RV32-LABEL: freeze_anonstruct:
+; RV32: # %bb.0:
+; RV32-NEXT: lw a1, 0(a0)
+; RV32-NEXT: lw a0, 4(a0)
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_anonstruct:
+; RV64: # %bb.0:
+; RV64-NEXT: lw a1, 0(a0)
+; RV64-NEXT: lw a0, 4(a0)
+; RV64-NEXT: addw a0, a1, a0
+; RV64-NEXT: ret
+ %s = load {i32, i32}, ptr %p
+ %y1 = freeze {i32, i32} %s
+ %v1 = extractvalue {i32, i32} %y1, 0
+ %v2 = extractvalue {i32, i32} %y1, 1
+ %t1 = add i32 %v1, %v2
+ ret i32 %t1
+}
+
+define i32 @freeze_anonstruct2(ptr %p) {
+; RV32-LABEL: freeze_anonstruct2:
+; RV32: # %bb.0:
+; RV32-NEXT: lh a1, 4(a0)
+; RV32-NEXT: lw a0, 0(a0)
+; RV32-NEXT: lui a2, 16
+; RV32-NEXT: addi a2, a2, -1
+; RV32-NEXT: and a1, a1, a2
+; RV32-NEXT: add a0, a0, a1
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_anonstruct2:
+; RV64: # %bb.0:
+; RV64-NEXT: lh a1, 4(a0)
+; RV64-NEXT: lw a0, 0(a0)
+; RV64-NEXT: lui a2, 16
+; RV64-NEXT: addi a2, a2, -1
+; RV64-NEXT: and a1, a1, a2
+; RV64-NEXT: addw a0, a0, a1
+; RV64-NEXT: ret
+ %s = load {i32, i16}, ptr %p
+ %y1 = freeze {i32, i16} %s
+ %v1 = extractvalue {i32, i16} %y1, 0
+ %v2 = extractvalue {i32, i16} %y1, 1
+ %z2 = zext i16 %v2 to i32
+ %t1 = add i32 %v1, %z2
+ ret i32 %t1
+}
+
+define i32 @freeze_array(ptr %p) nounwind {
+; RV32-LABEL: freeze_array:
+; RV32: # %bb.0:
+; RV32-NEXT: lw a1, 0(a0)
+; RV32-NEXT: lw a0, 4(a0)
+; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: ret
+;
+; RV64-LABEL: freeze_array:
+; RV64: # %bb.0:
+; RV64-NEXT: lw a1, 0(a0)
+; RV64-NEXT: lw a0, 4(a0)
+; RV64-NEXT: addw a0, a1, a0
+; RV64-NEXT: ret
+ %s = load [2 x i32], ptr %p
+ %y1 = freeze [2 x i32] %s
+ %v1 = extractvalue [2 x i32] %y1, 0
+ %v2 = extractvalue [2 x i32] %y1, 1
+ %t1 = add i32 %v1, %v2
+ ret i32 %t1
+}
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv32.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv32.mir
new file mode 100644
index 000000000000..4217910dc506
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv32.mir
@@ -0,0 +1,62 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv32 -mattr=+f,+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: freeze_i32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $x10
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $x10 = COPY [[FREEZE]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s32) = COPY $x10
+ %2:_(s32) = G_FREEZE %1
+ $x10 = COPY %2(s32)
+ PseudoRET implicit $x10
+
+...
+---
+name: freeze_f32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_f32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $f10_f = COPY [[FREEZE]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $f10_f
+ %1:_(s32) = COPY $f10_f
+ %2:_(s32) = G_FREEZE %1
+ $f10_f = COPY %2(s32)
+ PseudoRET implicit $f10_f
+
+...
+---
+name: freeze_nxv2i1
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_nxv2i1
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<vscale x 2 x s1>) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $v8 = COPY [[FREEZE]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v8
+ %2:_(<vscale x 2 x s1>) = G_FREEZE %1
+ $v8 = COPY %2(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: freeze_nxv2i32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_nxv2i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<vscale x 2 x s32>) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $v8 = COPY [[FREEZE]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ %2:_(<vscale x 2 x s32>) = G_FREEZE %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv64.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv64.mir
new file mode 100644
index 000000000000..355e22591588
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/legalize-freeze-rv64.mir
@@ -0,0 +1,96 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4
+# RUN: llc -mtriple=riscv64 -mattr=+f,+v -run-pass=legalizer %s -o - | FileCheck %s
+---
+name: freeze_i32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[TRUNC:%[0-9]+]]:_(s32) = G_TRUNC [[COPY]](s64)
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[TRUNC]]
+ ; CHECK-NEXT: [[ANYEXT:%[0-9]+]]:_(s64) = G_ANYEXT [[FREEZE]](s32)
+ ; CHECK-NEXT: $x10 = COPY [[ANYEXT]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s64) = COPY $x10
+ %2:_(s32) = G_TRUNC %1(s64)
+ %3:_(s32) = G_FREEZE %2
+ %4:_(s64) = G_ANYEXT %3(s32)
+ $x10 = COPY %4(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: freeze_f32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_f32
+ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $f10_f
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $f10_f = COPY [[FREEZE]](s32)
+ ; CHECK-NEXT: PseudoRET implicit $f10_f
+ %1:_(s32) = COPY $f10_f
+ %2:_(s32) = G_FREEZE %1
+ $f10_f = COPY %2(s32)
+ PseudoRET implicit $f10_f
+
+...
+---
+name: freeze_i64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(s64) = COPY $x10
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(s64) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $x10 = COPY [[FREEZE]](s64)
+ ; CHECK-NEXT: PseudoRET implicit $x10
+ %1:_(s64) = COPY $x10
+ %2:_(s64) = G_FREEZE %1
+ $x10 = COPY %2(s64)
+ PseudoRET implicit $x10
+
+...
+---
+name: freeze_nxv2i1
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_nxv2i1
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s1>) = COPY $v8
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<vscale x 2 x s1>) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $v8 = COPY [[FREEZE]](<vscale x 2 x s1>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s1>) = COPY $v8
+ %2:_(<vscale x 2 x s1>) = G_FREEZE %1
+ $v8 = COPY %2(<vscale x 2 x s1>)
+ PseudoRET implicit $v8
+
+...
+---
+name: freeze_nxv2i32
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_nxv2i32
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<vscale x 2 x s32>) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $v8 = COPY [[FREEZE]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ %2:_(<vscale x 2 x s32>) = G_FREEZE %1
+ $v8 = COPY %2(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: freeze_nxv2i64
+body: |
+ bb.0.entry:
+ ; CHECK-LABEL: name: freeze_nxv2i64
+ ; CHECK: [[COPY:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8
+ ; CHECK-NEXT: [[FREEZE:%[0-9]+]]:_(<vscale x 2 x s64>) = G_FREEZE [[COPY]]
+ ; CHECK-NEXT: $v8m2 = COPY [[FREEZE]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %1:_(<vscale x 2 x s64>) = COPY $v8
+ %2:_(<vscale x 2 x s64>) = G_FREEZE %1
+ $v8m2 = COPY %2(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
diff --git a/llvm/test/CodeGen/RISCV/O0-pipeline.ll b/llvm/test/CodeGen/RISCV/O0-pipeline.ll
index 3aaa5dc03a7d..e4abc93d1a8a 100644
--- a/llvm/test/CodeGen/RISCV/O0-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O0-pipeline.ll
@@ -44,14 +44,12 @@
; CHECK-NEXT: RISC-V Insert Write VXRM Pass
; CHECK-NEXT: Init Undef Pass
; CHECK-NEXT: Eliminate PHI nodes for register allocation
-; CHECK-NEXT: MachineDominator Tree Construction
-; CHECK-NEXT: Slot index numbering
-; CHECK-NEXT: Live Interval Analysis
-; CHECK-NEXT: RISC-V Insert VSETVLI pass
; CHECK-NEXT: Two-Address instruction pass
; CHECK-NEXT: Fast Register Allocator
+; CHECK-NEXT: MachineDominator Tree Construction
; CHECK-NEXT: Slot index numbering
; CHECK-NEXT: Live Interval Analysis
+; CHECK-NEXT: RISC-V Insert VSETVLI pass
; CHECK-NEXT: RISC-V Coalesce VSETVLI pass
; CHECK-NEXT: Fast Register Allocator
; CHECK-NEXT: Remove Redundant DEBUG_VALUE analysis
diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
index 52634b2a8162..0528b00d408b 100644
--- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll
+++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll
@@ -128,7 +128,6 @@
; CHECK-NEXT: Slot index numbering
; CHECK-NEXT: Live Interval Analysis
; CHECK-NEXT: Register Coalescer
-; CHECK-NEXT: RISC-V Insert VSETVLI pass
; CHECK-NEXT: Rename Disconnected Subregister Components
; CHECK-NEXT: Machine Instruction Scheduler
; CHECK-NEXT: Machine Block Frequency Analysis
@@ -142,6 +141,7 @@
; CHECK-NEXT: Machine Optimization Remark Emitter
; CHECK-NEXT: Greedy Register Allocator
; CHECK-NEXT: Virtual Register Rewriter
+; CHECK-NEXT: RISC-V Insert VSETVLI pass
; CHECK-NEXT: RISC-V Coalesce VSETVLI pass
; CHECK-NEXT: RISC-V Dead register definitions
; CHECK-NEXT: Virtual Register Map
diff --git a/llvm/test/CodeGen/RISCV/attributes.ll b/llvm/test/CodeGen/RISCV/attributes.ll
index 953ed5ee3795..a1eb17956b82 100644
--- a/llvm/test/CodeGen/RISCV/attributes.ll
+++ b/llvm/test/CodeGen/RISCV/attributes.ll
@@ -75,6 +75,8 @@
; RUN: llc -mtriple=riscv32 -mattr=+xtheadmemidx %s -o - | FileCheck --check-prefix=RV32XTHEADMEMIDX %s
; RUN: llc -mtriple=riscv32 -mattr=+xtheadmempair %s -o - | FileCheck --check-prefix=RV32XTHEADMEMPAIR %s
; RUN: llc -mtriple=riscv32 -mattr=+xtheadsync %s -o - | FileCheck --check-prefix=RV32XTHEADSYNC %s
+; RUN: llc -mtriple=riscv32 -mattr=+zaamo %s -o - | FileCheck --check-prefix=RV32ZAAMO %s
+; RUN: llc -mtriple=riscv32 -mattr=+zalrsc %s -o - | FileCheck --check-prefix=RV32ZALRSC %s
; RUN: llc -mtriple=riscv32 -mattr=+zca %s -o - | FileCheck --check-prefixes=CHECK,RV32ZCA %s
; RUN: llc -mtriple=riscv32 -mattr=+zcb %s -o - | FileCheck --check-prefixes=CHECK,RV32ZCB %s
; RUN: llc -mtriple=riscv32 -mattr=+zcd %s -o - | FileCheck --check-prefixes=CHECK,RV32ZCD %s
@@ -112,10 +114,8 @@
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV32ZFBFMIN %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zvfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV32ZVFBFMIN %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zvfbfwma %s -o - | FileCheck --check-prefixes=CHECK,RV32ZVFBFWMA %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zaamo %s -o - | FileCheck --check-prefix=RV32ZAAMO %s
; RUN: llc -mtriple=riscv32 -mattr=+a,zacas %s -o - | FileCheck --check-prefix=RV32ZACAS %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zalasr %s -o - | FileCheck --check-prefix=RV32ZALASR %s
-; RUN: llc -mtriple=riscv32 -mattr=+experimental-zalrsc %s -o - | FileCheck --check-prefix=RV32ZALRSC %s
; RUN: llc -mtriple=riscv32 -mattr=+zama16b %s -o - | FileCheck --check-prefixes=CHECK,RV32ZAMA16B %s
; RUN: llc -mtriple=riscv32 -mattr=+experimental-zicfilp %s -o - | FileCheck --check-prefix=RV32ZICFILP %s
; RUN: llc -mtriple=riscv32 -mattr=+a,+experimental-zabha %s -o - | FileCheck --check-prefix=RV32ZABHA %s
@@ -205,6 +205,8 @@
; RUN: llc -mtriple=riscv64 -mattr=+zama16b %s -o - | FileCheck --check-prefixes=CHECK,RV64ZAMA16B %s
; RUN: llc -mtriple=riscv64 -mattr=+zawrs %s -o - | FileCheck --check-prefixes=CHECK,RV64ZAWRS %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-ztso %s -o - | FileCheck --check-prefixes=CHECK,RV64ZTSO %s
+; RUN: llc -mtriple=riscv64 -mattr=+zaamo %s -o - | FileCheck --check-prefix=RV64ZAAMO %s
+; RUN: llc -mtriple=riscv64 -mattr=+zalrsc %s -o - | FileCheck --check-prefix=RV64ZALRSC %s
; RUN: llc -mtriple=riscv64 -mattr=+zca %s -o - | FileCheck --check-prefixes=CHECK,RV64ZCA %s
; RUN: llc -mtriple=riscv64 -mattr=+zcb %s -o - | FileCheck --check-prefixes=CHECK,RV64ZCB %s
; RUN: llc -mtriple=riscv64 -mattr=+zcd %s -o - | FileCheck --check-prefixes=CHECK,RV64ZCD %s
@@ -245,10 +247,8 @@
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV64ZFBFMIN %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zvfbfmin %s -o - | FileCheck --check-prefixes=CHECK,RV64ZVFBFMIN %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zvfbfwma %s -o - | FileCheck --check-prefixes=CHECK,RV64ZVFBFWMA %s
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zaamo %s -o - | FileCheck --check-prefix=RV64ZAAMO %s
; RUN: llc -mtriple=riscv64 -mattr=+a,zacas %s -o - | FileCheck --check-prefix=RV64ZACAS %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zalasr %s -o - | FileCheck --check-prefix=RV64ZALASR %s
-; RUN: llc -mtriple=riscv64 -mattr=+experimental-zalrsc %s -o - | FileCheck --check-prefix=RV64ZALRSC %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-zicfilp %s -o - | FileCheck --check-prefix=RV64ZICFILP %s
; RUN: llc -mtriple=riscv64 -mattr=+a,+experimental-zabha %s -o - | FileCheck --check-prefix=RV64ZABHA %s
; RUN: llc -mtriple=riscv64 -mattr=+experimental-ssnpm %s -o - | FileCheck --check-prefix=RV64SSNPM %s
@@ -347,6 +347,8 @@
; RV32XTHEADMEMIDX: .attribute 5, "rv32i2p1_xtheadmemidx1p0"
; RV32XTHEADMEMPAIR: .attribute 5, "rv32i2p1_xtheadmempair1p0"
; RV32XTHEADSYNC: .attribute 5, "rv32i2p1_xtheadsync1p0"
+; RV32ZAAMO: .attribute 5, "rv32i2p1_zaamo1p0"
+; RV32ZALRSC: .attribute 5, "rv32i2p1_zalrsc1p0"
; RV32ZCA: .attribute 5, "rv32i2p1_zca1p0"
; RV32ZCB: .attribute 5, "rv32i2p1_zca1p0_zcb1p0"
; RV32ZCD: .attribute 5, "rv32i2p1_f2p2_d2p2_zicsr2p0_zca1p0_zcd1p0"
@@ -384,10 +386,8 @@
; RV32ZFBFMIN: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zfbfmin1p0"
; RV32ZVFBFMIN: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zve32f1p0_zve32x1p0_zvfbfmin1p0_zvl32b1p0"
; RV32ZVFBFWMA: .attribute 5, "rv32i2p1_f2p2_zicsr2p0_zfbfmin1p0_zve32f1p0_zve32x1p0_zvfbfmin1p0_zvfbfwma1p0_zvl32b1p0"
-; RV32ZAAMO: .attribute 5, "rv32i2p1_zaamo0p2"
; RV32ZACAS: .attribute 5, "rv32i2p1_a2p1_zacas1p0"
; RV32ZALASR: .attribute 5, "rv32i2p1_zalasr0p1"
-; RV32ZALRSC: .attribute 5, "rv32i2p1_zalrsc0p2"
; RV32ZAMA16B: .attribute 5, "rv32i2p1_zama16b1p0"
; RV32ZICFILP: .attribute 5, "rv32i2p1_zicfilp0p4"
; RV32ZABHA: .attribute 5, "rv32i2p1_a2p1_zabha1p0"
@@ -476,6 +476,8 @@
; RV64XTHEADSYNC: .attribute 5, "rv64i2p1_xtheadsync1p0"
; RV64XTHEADVDOT: .attribute 5, "rv64i2p1_f2p2_d2p2_v1p0_zicsr2p0_zve32f1p0_zve32x1p0_zve64d1p0_zve64f1p0_zve64x1p0_zvl128b1p0_zvl32b1p0_zvl64b1p0_xtheadvdot1p0"
; RV64ZTSO: .attribute 5, "rv64i2p1_ztso0p1"
+; RV64ZAAMO: .attribute 5, "rv64i2p1_zaamo1p0"
+; RV64ZALRSC: .attribute 5, "rv64i2p1_zalrsc1p0"
; RV64ZCA: .attribute 5, "rv64i2p1_zca1p0"
; RV64ZCB: .attribute 5, "rv64i2p1_zca1p0_zcb1p0"
; RV64ZCD: .attribute 5, "rv64i2p1_f2p2_d2p2_zicsr2p0_zca1p0_zcd1p0"
@@ -516,10 +518,8 @@
; RV64ZFBFMIN: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zfbfmin1p0"
; RV64ZVFBFMIN: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zve32f1p0_zve32x1p0_zvfbfmin1p0_zvl32b1p0"
; RV64ZVFBFWMA: .attribute 5, "rv64i2p1_f2p2_zicsr2p0_zfbfmin1p0_zve32f1p0_zve32x1p0_zvfbfmin1p0_zvfbfwma1p0_zvl32b1p0"
-; RV64ZAAMO: .attribute 5, "rv64i2p1_zaamo0p2"
; RV64ZACAS: .attribute 5, "rv64i2p1_a2p1_zacas1p0"
; RV64ZALASR: .attribute 5, "rv64i2p1_zalasr0p1"
-; RV64ZALRSC: .attribute 5, "rv64i2p1_zalrsc0p2"
; RV64ZICFILP: .attribute 5, "rv64i2p1_zicfilp0p4"
; RV64ZABHA: .attribute 5, "rv64i2p1_a2p1_zabha1p0"
; RV64SSNPM: .attribute 5, "rv64i2p1_ssnpm0p8"
diff --git a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
index 83a4f63add33..eb6ac985287a 100644
--- a/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/early-clobber-tied-def-subreg-liveness.ll
@@ -24,36 +24,36 @@ define void @_Z3foov() {
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_49)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_49)
; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
-; CHECK-NEXT: vle16.v v8, (a0)
+; CHECK-NEXT: vle16.v v10, (a0)
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_48)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_48)
-; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs1r.v v10, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_46)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_46)
-; CHECK-NEXT: vle16.v v10, (a0)
+; CHECK-NEXT: vle16.v v12, (a0)
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_45)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_45)
-; CHECK-NEXT: vle16.v v12, (a0)
+; CHECK-NEXT: vle16.v v14, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: vs2r.v v10, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: vs2r.v v12, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: add a0, a0, a1
; CHECK-NEXT: vs2r.v v14, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: add a0, a0, a1
+; CHECK-NEXT: vs2r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: #APP
; CHECK-NEXT: #NO_APP
-; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_40)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_40)
+; CHECK-NEXT: vsetivli zero, 2, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: lui a0, %hi(.L__const._Z3foov.var_44)
; CHECK-NEXT: addi a0, a0, %lo(.L__const._Z3foov.var_44)
@@ -71,12 +71,12 @@ define void @_Z3foov() {
; CHECK-NEXT: lui a0, 1048572
; CHECK-NEXT: addi a0, a0, 928
; CHECK-NEXT: vmsbc.vx v0, v8, a0
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, tu, mu
; CHECK-NEXT: vsext.vf2 v10, v8, v0.t
; CHECK-NEXT: lui a0, %hi(var_47)
; CHECK-NEXT: addi a0, a0, %lo(var_47)
diff --git a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
index ea8feef33298..6009a6c7e138 100644
--- a/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
+++ b/llvm/test/CodeGen/RISCV/intrinsic-cttz-elts-vscale.ll
@@ -75,18 +75,18 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
; RV32-NEXT: sw a0, 16(sp)
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vsetvli a3, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a2), zero
-; RV32-NEXT: vid.v v8
+; RV32-NEXT: vlse64.v v8, (a2), zero
+; RV32-NEXT: vid.v v16
; RV32-NEXT: li a2, -1
-; RV32-NEXT: vmadd.vx v8, a2, v16
-; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT: vmadd.vx v16, a2, v8
; RV32-NEXT: addi a2, sp, 32
-; RV32-NEXT: vl2r.v v16, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vmsne.vi v0, v16, 0
+; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV32-NEXT: vmsne.vi v0, v8, 0
; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV32-NEXT: vmv.v.i v16, 0
-; RV32-NEXT: vmerge.vim v16, v16, -1, v0
-; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmv.v.i v8, 0
+; RV32-NEXT: vmerge.vim v8, v8, -1, v0
+; RV32-NEXT: vand.vv v8, v16, v8
; RV32-NEXT: vredmaxu.vs v8, v8, v8
; RV32-NEXT: vmv.x.s a2, v8
; RV32-NEXT: sltu a3, a0, a2
@@ -108,15 +108,15 @@ define i64 @ctz_nxv8i1_no_range(<vscale x 8 x i16> %a) {
; RV64: # %bb.0:
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV64-NEXT: vmv.v.x v24, a0
-; RV64-NEXT: vid.v v16
+; RV64-NEXT: vmv.v.x v16, a0
+; RV64-NEXT: vid.v v24
; RV64-NEXT: li a1, -1
-; RV64-NEXT: vmadd.vx v16, a1, v24
+; RV64-NEXT: vmadd.vx v24, a1, v16
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV64-NEXT: vmsne.vi v0, v8, 0
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
-; RV64-NEXT: vmerge.vvm v8, v8, v16, v0
+; RV64-NEXT: vmerge.vvm v8, v8, v24, v0
; RV64-NEXT: vredmaxu.vs v8, v8, v8
; RV64-NEXT: vmv.x.s a1, v8
; RV64-NEXT: sub a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/pr69586.ll b/llvm/test/CodeGen/RISCV/pr69586.ll
index 15daf2c57790..7084c04805be 100644
--- a/llvm/test/CodeGen/RISCV/pr69586.ll
+++ b/llvm/test/CodeGen/RISCV/pr69586.ll
@@ -927,258 +927,258 @@ define void @test(ptr %0, ptr %1, i64 %2) {
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v14, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v10, v12
-; REMAT-NEXT: vle32.v v12, (a2)
+; REMAT-NEXT: vle32.v v10, (a2)
; REMAT-NEXT: li a2, 11
; REMAT-NEXT: slli a2, a2, 10
; REMAT-NEXT: add a2, a0, a2
-; REMAT-NEXT: vle32.v v16, (a2)
+; REMAT-NEXT: vle32.v v26, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v8, v14
-; REMAT-NEXT: vle32.v v10, (a2)
+; REMAT-NEXT: vle32.v v12, (a2)
; REMAT-NEXT: li a2, 23
; REMAT-NEXT: slli a2, a2, 9
; REMAT-NEXT: add a2, a0, a2
-; REMAT-NEXT: vle32.v v26, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v12, v16
-; REMAT-NEXT: vle32.v v12, (a2)
-; REMAT-NEXT: lui a2, 3
-; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v28, (a2)
; REMAT-NEXT: vle32.v v14, (a2)
-; REMAT-NEXT: li a2, 25
-; REMAT-NEXT: slli a2, a2, 9
+; REMAT-NEXT: lui a2, 3
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
-; REMAT-NEXT: li a2, 13
-; REMAT-NEXT: slli a2, a2, 10
+; REMAT-NEXT: li a2, 25
+; REMAT-NEXT: slli a2, a2, 9
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v18, (a2)
-; REMAT-NEXT: li a2, 27
-; REMAT-NEXT: slli a2, a2, 9
+; REMAT-NEXT: li a2, 13
+; REMAT-NEXT: slli a2, a2, 10
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v20, (a2)
-; REMAT-NEXT: li a2, 7
-; REMAT-NEXT: slli a2, a2, 11
+; REMAT-NEXT: li a2, 27
+; REMAT-NEXT: slli a2, a2, 9
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v22, (a2)
-; REMAT-NEXT: li a2, 29
-; REMAT-NEXT: slli a2, a2, 9
+; REMAT-NEXT: li a2, 7
+; REMAT-NEXT: slli a2, a2, 11
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v24, (a2)
; REMAT-NEXT: vle32.v v8, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v10, v26
-; REMAT-NEXT: li a2, 15
-; REMAT-NEXT: slli a2, a2, 10
+; REMAT-NEXT: li a2, 29
+; REMAT-NEXT: slli a2, a2, 9
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v26, (a2)
; REMAT-NEXT: vle32.v v10, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v12, v28
-; REMAT-NEXT: li a2, 31
-; REMAT-NEXT: slli a2, a2, 9
+; REMAT-NEXT: li a2, 15
+; REMAT-NEXT: slli a2, a2, 10
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v28, (a2)
; REMAT-NEXT: vle32.v v12, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v14, v30
-; REMAT-NEXT: lui a2, 4
+; REMAT-NEXT: li a2, 31
+; REMAT-NEXT: slli a2, a2, 9
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v14, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v16, v6
; REMAT-NEXT: lui a2, 4
-; REMAT-NEXT: addiw a2, a2, 512
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v18, v4
-; REMAT-NEXT: li a2, 17
-; REMAT-NEXT: slli a2, a2, 10
+; REMAT-NEXT: lui a2, 4
+; REMAT-NEXT: addiw a2, a2, 512
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v18, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v20, v2
-; REMAT-NEXT: lui a2, 4
-; REMAT-NEXT: addiw a2, a2, 1536
+; REMAT-NEXT: li a2, 17
+; REMAT-NEXT: slli a2, a2, 10
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v20, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v22, v24
-; REMAT-NEXT: li a2, 9
-; REMAT-NEXT: slli a2, a2, 11
+; REMAT-NEXT: lui a2, 4
+; REMAT-NEXT: addiw a2, a2, 1536
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v24, (a2)
; REMAT-NEXT: vle32.v v22, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v8, v26
-; REMAT-NEXT: lui a2, 5
-; REMAT-NEXT: addiw a2, a2, -1536
+; REMAT-NEXT: li a2, 9
+; REMAT-NEXT: slli a2, a2, 11
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v26, (a2)
; REMAT-NEXT: vle32.v v8, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v10, v28
-; REMAT-NEXT: li a2, 19
-; REMAT-NEXT: slli a2, a2, 10
+; REMAT-NEXT: lui a2, 5
+; REMAT-NEXT: addiw a2, a2, -1536
; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v28, (a2)
; REMAT-NEXT: vle32.v v10, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v12, v30
-; REMAT-NEXT: lui ra, 5
-; REMAT-NEXT: addiw ra, ra, -512
-; REMAT-NEXT: add a2, a0, ra
+; REMAT-NEXT: li a2, 19
+; REMAT-NEXT: slli a2, a2, 10
+; REMAT-NEXT: add a2, a0, a2
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v12, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v14, v6
-; REMAT-NEXT: lui s11, 5
-; REMAT-NEXT: add a2, a0, s11
+; REMAT-NEXT: lui ra, 5
+; REMAT-NEXT: addiw ra, ra, -512
+; REMAT-NEXT: add a2, a0, ra
; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v14, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v16, v4
-; REMAT-NEXT: lui s10, 5
-; REMAT-NEXT: addiw s10, s10, 512
-; REMAT-NEXT: add a2, a0, s10
+; REMAT-NEXT: lui s11, 5
+; REMAT-NEXT: add a2, a0, s11
; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v18, v2
-; REMAT-NEXT: li s9, 21
-; REMAT-NEXT: slli s9, s9, 10
-; REMAT-NEXT: add a2, a0, s9
+; REMAT-NEXT: lui s10, 5
+; REMAT-NEXT: addiw s10, s10, 512
+; REMAT-NEXT: add a2, a0, s10
; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v18, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v20, v24
-; REMAT-NEXT: lui s8, 5
-; REMAT-NEXT: addiw s8, s8, 1536
-; REMAT-NEXT: add a2, a0, s8
+; REMAT-NEXT: li s9, 21
+; REMAT-NEXT: slli s9, s9, 10
+; REMAT-NEXT: add a2, a0, s9
; REMAT-NEXT: vle32.v v24, (a2)
; REMAT-NEXT: vle32.v v20, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v22, v26
-; REMAT-NEXT: li s7, 11
-; REMAT-NEXT: slli s7, s7, 11
-; REMAT-NEXT: add a2, a0, s7
+; REMAT-NEXT: lui s8, 5
+; REMAT-NEXT: addiw s8, s8, 1536
+; REMAT-NEXT: add a2, a0, s8
; REMAT-NEXT: vle32.v v26, (a2)
; REMAT-NEXT: vle32.v v22, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v8, v28
-; REMAT-NEXT: lui s6, 6
-; REMAT-NEXT: addiw s6, s6, -1536
-; REMAT-NEXT: add a2, a0, s6
+; REMAT-NEXT: li s7, 11
+; REMAT-NEXT: slli s7, s7, 11
+; REMAT-NEXT: add a2, a0, s7
; REMAT-NEXT: vle32.v v28, (a2)
; REMAT-NEXT: vle32.v v8, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v10, v30
-; REMAT-NEXT: li s5, 23
-; REMAT-NEXT: slli s5, s5, 10
-; REMAT-NEXT: add a2, a0, s5
+; REMAT-NEXT: lui s6, 6
+; REMAT-NEXT: addiw s6, s6, -1536
+; REMAT-NEXT: add a2, a0, s6
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v10, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v12, v6
-; REMAT-NEXT: lui s4, 6
-; REMAT-NEXT: addiw s4, s4, -512
-; REMAT-NEXT: add a2, a0, s4
+; REMAT-NEXT: li s5, 23
+; REMAT-NEXT: slli s5, s5, 10
+; REMAT-NEXT: add a2, a0, s5
; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v12, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v14, v4
-; REMAT-NEXT: lui s3, 6
-; REMAT-NEXT: add a2, a0, s3
+; REMAT-NEXT: lui s4, 6
+; REMAT-NEXT: addiw s4, s4, -512
+; REMAT-NEXT: add a2, a0, s4
; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v14, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v16, v2
-; REMAT-NEXT: lui s2, 6
-; REMAT-NEXT: addiw s2, s2, 512
-; REMAT-NEXT: add a2, a0, s2
+; REMAT-NEXT: lui s3, 6
+; REMAT-NEXT: add a2, a0, s3
; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v18, v24
-; REMAT-NEXT: li s1, 25
-; REMAT-NEXT: slli s1, s1, 10
-; REMAT-NEXT: add a2, a0, s1
+; REMAT-NEXT: lui s2, 6
+; REMAT-NEXT: addiw s2, s2, 512
+; REMAT-NEXT: add a2, a0, s2
; REMAT-NEXT: vle32.v v0, (a2)
; REMAT-NEXT: vle32.v v18, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v20, v26
-; REMAT-NEXT: lui s0, 6
-; REMAT-NEXT: addiw s0, s0, 1536
-; REMAT-NEXT: add a2, a0, s0
+; REMAT-NEXT: li s1, 25
+; REMAT-NEXT: slli s1, s1, 10
+; REMAT-NEXT: add a2, a0, s1
; REMAT-NEXT: vle32.v v26, (a2)
; REMAT-NEXT: vle32.v v20, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v22, v28
-; REMAT-NEXT: li t6, 13
-; REMAT-NEXT: slli t6, t6, 11
-; REMAT-NEXT: add a2, a0, t6
+; REMAT-NEXT: lui s0, 6
+; REMAT-NEXT: addiw s0, s0, 1536
+; REMAT-NEXT: add a2, a0, s0
; REMAT-NEXT: vle32.v v28, (a2)
; REMAT-NEXT: vle32.v v22, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v8, v30
-; REMAT-NEXT: lui t5, 7
-; REMAT-NEXT: addiw t5, t5, -1536
-; REMAT-NEXT: add a2, a0, t5
+; REMAT-NEXT: li t6, 13
+; REMAT-NEXT: slli t6, t6, 11
+; REMAT-NEXT: add a2, a0, t6
; REMAT-NEXT: vle32.v v30, (a2)
; REMAT-NEXT: vle32.v v24, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v10, v6
-; REMAT-NEXT: li t4, 27
-; REMAT-NEXT: slli t4, t4, 10
-; REMAT-NEXT: add a2, a0, t4
+; REMAT-NEXT: lui t5, 7
+; REMAT-NEXT: addiw t5, t5, -1536
+; REMAT-NEXT: add a2, a0, t5
; REMAT-NEXT: vle32.v v6, (a2)
; REMAT-NEXT: vle32.v v10, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v12, v4
-; REMAT-NEXT: lui t3, 7
-; REMAT-NEXT: addiw t3, t3, -512
-; REMAT-NEXT: add a2, a0, t3
+; REMAT-NEXT: li t4, 27
+; REMAT-NEXT: slli t4, t4, 10
+; REMAT-NEXT: add a2, a0, t4
; REMAT-NEXT: vle32.v v4, (a2)
; REMAT-NEXT: vle32.v v12, (a2)
; REMAT-NEXT: sf.vc.vv 3, 0, v14, v2
+; REMAT-NEXT: lui t3, 7
+; REMAT-NEXT: addiw t3, t3, -512
+; REMAT-NEXT: add a2, a0, t3
+; REMAT-NEXT: vle32.v v2, (a2)
+; REMAT-NEXT: vle32.v v14, (a2)
+; REMAT-NEXT: sf.vc.vv 3, 0, v16, v0
; REMAT-NEXT: lui t2, 7
; REMAT-NEXT: add a2, a0, t2
-; REMAT-NEXT: vle32.v v2, (a2)
+; REMAT-NEXT: vle32.v v0, (a2)
; REMAT-NEXT: vle32.v v8, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v16, v0
+; REMAT-NEXT: sf.vc.vv 3, 0, v18, v26
; REMAT-NEXT: lui t1, 7
; REMAT-NEXT: addiw t1, t1, 512
; REMAT-NEXT: add a2, a0, t1
-; REMAT-NEXT: vle32.v v14, (a2)
; REMAT-NEXT: vle32.v v16, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v18, v26
+; REMAT-NEXT: vle32.v v18, (a2)
+; REMAT-NEXT: sf.vc.vv 3, 0, v20, v28
; REMAT-NEXT: li t0, 29
; REMAT-NEXT: slli t0, t0, 10
; REMAT-NEXT: add a2, a0, t0
-; REMAT-NEXT: vle32.v v18, (a2)
+; REMAT-NEXT: vle32.v v20, (a2)
; REMAT-NEXT: vle32.v v26, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v20, v28
+; REMAT-NEXT: sf.vc.vv 3, 0, v22, v30
; REMAT-NEXT: lui a7, 7
; REMAT-NEXT: addiw a7, a7, 1536
; REMAT-NEXT: add a2, a0, a7
-; REMAT-NEXT: vle32.v v20, (a2)
+; REMAT-NEXT: vle32.v v22, (a2)
; REMAT-NEXT: vle32.v v28, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v22, v30
+; REMAT-NEXT: sf.vc.vv 3, 0, v24, v6
; REMAT-NEXT: li a6, 15
; REMAT-NEXT: slli a6, a6, 11
; REMAT-NEXT: add a2, a0, a6
-; REMAT-NEXT: vle32.v v22, (a2)
+; REMAT-NEXT: vle32.v v24, (a2)
; REMAT-NEXT: vle32.v v30, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v24, v6
+; REMAT-NEXT: sf.vc.vv 3, 0, v10, v4
; REMAT-NEXT: lui a5, 8
; REMAT-NEXT: addiw a5, a5, -1536
; REMAT-NEXT: add a2, a0, a5
-; REMAT-NEXT: vle32.v v24, (a2)
+; REMAT-NEXT: vle32.v v10, (a2)
; REMAT-NEXT: vle32.v v6, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v10, v4
+; REMAT-NEXT: sf.vc.vv 3, 0, v12, v2
; REMAT-NEXT: li a4, 31
; REMAT-NEXT: slli a4, a4, 10
; REMAT-NEXT: add a2, a0, a4
-; REMAT-NEXT: vle32.v v10, (a2)
+; REMAT-NEXT: vle32.v v12, (a2)
; REMAT-NEXT: vle32.v v4, (a2)
-; REMAT-NEXT: sf.vc.vv 3, 0, v12, v2
+; REMAT-NEXT: sf.vc.vv 3, 0, v14, v0
; REMAT-NEXT: lui a3, 8
; REMAT-NEXT: addiw a3, a3, -512
; REMAT-NEXT: add a2, a0, a3
-; REMAT-NEXT: vle32.v v12, (a2)
+; REMAT-NEXT: vle32.v v14, (a2)
; REMAT-NEXT: vle32.v v2, (a2)
; REMAT-NEXT: lui a2, 8
; REMAT-NEXT: add a0, a0, a2
; REMAT-NEXT: vle32.v v0, (a0)
-; REMAT-NEXT: sf.vc.vv 3, 0, v8, v14
-; REMAT-NEXT: sf.vc.vv 3, 0, v16, v18
-; REMAT-NEXT: sf.vc.vv 3, 0, v26, v20
-; REMAT-NEXT: sf.vc.vv 3, 0, v28, v22
-; REMAT-NEXT: sf.vc.vv 3, 0, v30, v24
-; REMAT-NEXT: sf.vc.vv 3, 0, v6, v10
-; REMAT-NEXT: sf.vc.vv 3, 0, v4, v12
+; REMAT-NEXT: sf.vc.vv 3, 0, v8, v16
+; REMAT-NEXT: sf.vc.vv 3, 0, v18, v20
+; REMAT-NEXT: sf.vc.vv 3, 0, v26, v22
+; REMAT-NEXT: sf.vc.vv 3, 0, v28, v24
+; REMAT-NEXT: sf.vc.vv 3, 0, v30, v10
+; REMAT-NEXT: sf.vc.vv 3, 0, v6, v12
+; REMAT-NEXT: sf.vc.vv 3, 0, v4, v14
; REMAT-NEXT: sf.vc.vv 3, 0, v2, v0
; REMAT-NEXT: sf.vc.v.i 2, 0, v8, 0
; REMAT-NEXT: addi a0, a1, 1024
diff --git a/llvm/test/CodeGen/RISCV/pr90730.ll b/llvm/test/CodeGen/RISCV/pr90730.ll
new file mode 100644
index 000000000000..7c3f4b43089c
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/pr90730.ll
@@ -0,0 +1,22 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc < %s -mtriple=riscv64 -mattr=+zbb | FileCheck %s
+
+define i32 @pr90730(i32 %x, i1 %y, ptr %p) {
+; CHECK-LABEL: pr90730:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: lui a1, 8
+; CHECK-NEXT: addiw a1, a1, -960
+; CHECK-NEXT: andn a0, a1, a0
+; CHECK-NEXT: sw zero, 0(a2)
+; CHECK-NEXT: ret
+entry:
+ %ext = zext i1 %y to i32
+ %xor1 = xor i32 %ext, 31817
+ %and1 = and i32 %xor1, %x
+ store i32 %and1, ptr %p, align 4
+ %v = load i32, ptr %p, align 4
+ %and2 = and i32 %v, 31808
+ %xor2 = xor i32 %and2, 31808
+ store i32 0, ptr %p, align 4
+ ret i32 %xor2
+}
diff --git a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
index 81ef6072449e..c92ba98dcc33 100644
--- a/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
+++ b/llvm/test/CodeGen/RISCV/regalloc-last-chance-recoloring-failure.ll
@@ -43,7 +43,6 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: call func
-; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
@@ -55,6 +54,7 @@ define void @last_chance_recoloring_failure() {
; CHECK-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; CHECK-NEXT: vfwsub.wv v8, v24, v16
; CHECK-NEXT: vsetvli zero, zero, e32, m8, tu, mu
; CHECK-NEXT: vfdiv.vv v8, v24, v8, v0.t
@@ -99,7 +99,6 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: addi a0, sp, 16
; SUBREGLIVENESS-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; SUBREGLIVENESS-NEXT: call func
-; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; SUBREGLIVENESS-NEXT: csrr a0, vlenb
; SUBREGLIVENESS-NEXT: slli a0, a0, 3
; SUBREGLIVENESS-NEXT: add a0, sp, a0
@@ -111,6 +110,7 @@ define void @last_chance_recoloring_failure() {
; SUBREGLIVENESS-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload
; SUBREGLIVENESS-NEXT: addi a0, sp, 16
; SUBREGLIVENESS-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; SUBREGLIVENESS-NEXT: vfwsub.wv v8, v24, v16
; SUBREGLIVENESS-NEXT: vsetvli zero, zero, e32, m8, tu, mu
; SUBREGLIVENESS-NEXT: vfdiv.vv v8, v24, v8, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
index eb74e2d302f1..05d6716e4719 100644
--- a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll
@@ -590,13 +590,12 @@ define <vscale x 16 x i64> @vp_abs_nxv16i64(<vscale x 16 x i64> %va, <vscale x 1
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB46_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: addi a0, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
index e578aada5a9c..91f700ef9680 100644
--- a/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/access-fixed-objects-by-rvv.ll
@@ -39,9 +39,9 @@ define <vscale x 1 x i64> @access_fixed_and_vector_objects(ptr %val) {
; RV64IV-NEXT: addi a0, sp, 8
; RV64IV-NEXT: vl1re64.v v8, (a0)
; RV64IV-NEXT: addi a0, sp, 528
-; RV64IV-NEXT: ld a1, 520(sp)
; RV64IV-NEXT: vl1re64.v v9, (a0)
-; RV64IV-NEXT: vsetvli zero, a1, e64, m1, ta, ma
+; RV64IV-NEXT: ld a0, 520(sp)
+; RV64IV-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV64IV-NEXT: vadd.vv v8, v8, v9
; RV64IV-NEXT: csrr a0, vlenb
; RV64IV-NEXT: slli a0, a0, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
index 139579b3d2a3..9cb3991f31f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/active_lane_mask.ll
@@ -103,9 +103,9 @@ define <8 x i1> @fv8(ptr %p, i64 %index, i64 %tc) {
define <32 x i1> @fv32(ptr %p, i64 %index, i64 %tc) {
; CHECK-LABEL: fv32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: lui a0, %hi(.LCPI8_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_0)
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
; CHECK-NEXT: vid.v v16
; CHECK-NEXT: vsaddu.vx v16, v16, a1
@@ -124,31 +124,30 @@ define <64 x i1> @fv64(ptr %p, i64 %index, i64 %tc) {
; CHECK-LABEL: fv64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vsaddu.vx v8, v8, a1
+; CHECK-NEXT: vmsltu.vx v0, v8, a2
; CHECK-NEXT: lui a0, %hi(.LCPI9_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_0)
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vid.v v16
-; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v0, v16, a2
-; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsaddu.vx v8, v16, a1
-; CHECK-NEXT: vmsltu.vx v16, v8, a2
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v16, 2
; CHECK-NEXT: lui a0, %hi(.LCPI9_1)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_1)
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v9, (a0)
; CHECK-NEXT: vsext.vf8 v16, v8
-; CHECK-NEXT: vsaddu.vx v8, v16, a1
-; CHECK-NEXT: vmsltu.vx v16, v8, a2
-; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v16, 4
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v8, v16, a2
+; CHECK-NEXT: vsext.vf8 v16, v9
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
; CHECK-NEXT: lui a0, %hi(.LCPI9_2)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI9_2)
+; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vmsltu.vx v10, v16, a2
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v0, v8, 2
+; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v0, v10, 4
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vsext.vf8 v16, v8
+; CHECK-NEXT: vsext.vf8 v16, v9
; CHECK-NEXT: vsaddu.vx v8, v16, a1
; CHECK-NEXT: vmsltu.vx v16, v8, a2
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
@@ -174,51 +173,48 @@ define <128 x i1> @fv128(ptr %p, i64 %index, i64 %tc) {
; CHECK-NEXT: vsext.vf8 v16, v9
; CHECK-NEXT: vsaddu.vx v16, v16, a1
; CHECK-NEXT: vmsltu.vx v8, v16, a2
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 2
; CHECK-NEXT: lui a0, %hi(.LCPI10_2)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_2)
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle8.v v9, (a0)
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v9, v16, a2
-; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: lui a0, %hi(.LCPI10_3)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_3)
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vle8.v v11, (a0)
; CHECK-NEXT: vsext.vf8 v16, v9
; CHECK-NEXT: vsaddu.vx v16, v16, a1
; CHECK-NEXT: vmsltu.vx v9, v16, a2
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 6
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: lui a0, %hi(.LCPI10_4)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_4)
-; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsext.vf8 v16, v11
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v11, v16, a2
; CHECK-NEXT: vid.v v16
; CHECK-NEXT: vsaddu.vx v16, v16, a1
; CHECK-NEXT: vmsltu.vx v0, v16, a2
-; CHECK-NEXT: vsext.vf8 v16, v9
-; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v9, v16, a2
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v9, 2
+; CHECK-NEXT: lui a0, %hi(.LCPI10_4)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_4)
+; CHECK-NEXT: vle8.v v12, (a0)
; CHECK-NEXT: lui a0, %hi(.LCPI10_5)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_5)
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle8.v v9, (a0)
-; CHECK-NEXT: vsext.vf8 v16, v9
+; CHECK-NEXT: vle8.v v13, (a0)
+; CHECK-NEXT: vsext.vf8 v16, v12
; CHECK-NEXT: vsaddu.vx v16, v16, a1
-; CHECK-NEXT: vmsltu.vx v9, v16, a2
+; CHECK-NEXT: vmsltu.vx v12, v16, a2
+; CHECK-NEXT: vsext.vf8 v16, v13
+; CHECK-NEXT: vsaddu.vx v16, v16, a1
+; CHECK-NEXT: vmsltu.vx v13, v16, a2
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v8, v10, 2
; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v9, 4
+; CHECK-NEXT: vslideup.vi v8, v9, 4
; CHECK-NEXT: lui a0, %hi(.LCPI10_6)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI10_6)
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vle8.v v9, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vi v8, v11, 6
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v0, v12, 2
+; CHECK-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vi v0, v13, 4
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vsext.vf8 v16, v9
; CHECK-NEXT: vsaddu.vx v16, v16, a1
; CHECK-NEXT: vmsltu.vx v9, v16, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
index 879dff4a6e49..5217148ba4f4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bitreverse-vp.ll
@@ -1449,27 +1449,27 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64(<vscale x 1 x i64> %va, <vscale
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3, v0.t
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v10, v10, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11, v0.t
+; RV32-NEXT: vand.vx v11, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v11, v11, 24, v0.t
+; RV32-NEXT: vand.vv v12, v8, v10, v0.t
; RV32-NEXT: vsll.vi v12, v12, 8, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t
+; RV32-NEXT: vor.vv v11, v11, v12, v0.t
+; RV32-NEXT: vor.vv v9, v9, v11, v0.t
+; RV32-NEXT: vsrl.vx v11, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a3, v0.t
; RV32-NEXT: vand.vx v12, v12, a2, v0.t
-; RV32-NEXT: vor.vv v10, v12, v10, v0.t
+; RV32-NEXT: vor.vv v11, v12, v11, v0.t
; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t
; RV32-NEXT: vand.vx v12, v12, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v11, v0.t
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
-; RV32-NEXT: vor.vv v8, v8, v10, v0.t
+; RV32-NEXT: vor.vv v8, v8, v11, v0.t
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
; RV32-NEXT: lui a1, 61681
@@ -1590,27 +1590,27 @@ define <vscale x 1 x i64> @vp_bitreverse_nxv1i64_unmasked(<vscale x 1 x i64> %va
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3
; RV32-NEXT: vor.vv v9, v9, v10
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4
-; RV32-NEXT: vsll.vi v10, v10, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11
+; RV32-NEXT: vand.vx v11, v8, a4
+; RV32-NEXT: vsll.vi v11, v11, 24
+; RV32-NEXT: vand.vv v12, v8, v10
; RV32-NEXT: vsll.vi v12, v12, 8
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: vsrl.vx v10, v8, a1
+; RV32-NEXT: vor.vv v11, v11, v12
+; RV32-NEXT: vor.vv v9, v9, v11
+; RV32-NEXT: vsrl.vx v11, v8, a1
; RV32-NEXT: vsrl.vx v12, v8, a3
; RV32-NEXT: vand.vx v12, v12, a2
-; RV32-NEXT: vor.vv v10, v12, v10
+; RV32-NEXT: vor.vv v11, v12, v11
; RV32-NEXT: vsrl.vi v12, v8, 24
; RV32-NEXT: vand.vx v12, v12, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v11
+; RV32-NEXT: vand.vv v8, v8, v10
; RV32-NEXT: vor.vv v8, v8, v12
-; RV32-NEXT: vor.vv v8, v8, v10
+; RV32-NEXT: vor.vv v8, v8, v11
; RV32-NEXT: vor.vv v8, v9, v8
; RV32-NEXT: vsrl.vi v9, v8, 4
; RV32-NEXT: lui a1, 61681
@@ -1733,27 +1733,27 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64(<vscale x 2 x i64> %va, <vscale
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3, v0.t
; RV32-NEXT: vor.vv v10, v10, v12, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v12, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14, v0.t
+; RV32-NEXT: vand.vx v14, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v14, v14, 24, v0.t
+; RV32-NEXT: vand.vv v16, v8, v12, v0.t
; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
-; RV32-NEXT: vor.vv v12, v12, v16, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t
+; RV32-NEXT: vor.vv v14, v14, v16, v0.t
+; RV32-NEXT: vor.vv v10, v10, v14, v0.t
+; RV32-NEXT: vsrl.vx v14, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v12, v16, v12, v0.t
+; RV32-NEXT: vor.vv v14, v16, v14, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 24, v0.t
; RV32-NEXT: vand.vx v16, v16, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v14, v0.t
+; RV32-NEXT: vand.vv v8, v8, v12, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vor.vv v8, v8, v12, v0.t
+; RV32-NEXT: vor.vv v8, v8, v14, v0.t
; RV32-NEXT: vor.vv v8, v10, v8, v0.t
; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t
; RV32-NEXT: lui a1, 61681
@@ -1874,27 +1874,27 @@ define <vscale x 2 x i64> @vp_bitreverse_nxv2i64_unmasked(<vscale x 2 x i64> %va
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3
; RV32-NEXT: vor.vv v10, v10, v12
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4
-; RV32-NEXT: vsll.vi v12, v12, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14
+; RV32-NEXT: vand.vx v14, v8, a4
+; RV32-NEXT: vsll.vi v14, v14, 24
+; RV32-NEXT: vand.vv v16, v8, v12
; RV32-NEXT: vsll.vi v16, v16, 8
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vsrl.vx v12, v8, a1
+; RV32-NEXT: vor.vv v14, v14, v16
+; RV32-NEXT: vor.vv v10, v10, v14
+; RV32-NEXT: vsrl.vx v14, v8, a1
; RV32-NEXT: vsrl.vx v16, v8, a3
; RV32-NEXT: vand.vx v16, v16, a2
-; RV32-NEXT: vor.vv v12, v16, v12
+; RV32-NEXT: vor.vv v14, v16, v14
; RV32-NEXT: vsrl.vi v16, v8, 24
; RV32-NEXT: vand.vx v16, v16, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v14
+; RV32-NEXT: vand.vv v8, v8, v12
; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vor.vv v8, v8, v12
+; RV32-NEXT: vor.vv v8, v8, v14
; RV32-NEXT: vor.vv v8, v10, v8
; RV32-NEXT: vsrl.vi v10, v8, 4
; RV32-NEXT: lui a1, 61681
@@ -2017,13 +2017,13 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64(<vscale x 4 x i64> %va, <vscale
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3, v0.t
; RV32-NEXT: vor.vv v16, v12, v16, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v20, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vx v20, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v20, v20, 24, v0.t
; RV32-NEXT: vand.vv v24, v8, v12, v0.t
; RV32-NEXT: vsll.vi v24, v24, 8, v0.t
; RV32-NEXT: vor.vv v20, v20, v24, v0.t
@@ -2158,27 +2158,27 @@ define <vscale x 4 x i64> @vp_bitreverse_nxv4i64_unmasked(<vscale x 4 x i64> %va
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3
; RV32-NEXT: vor.vv v12, v12, v16
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v16, v16, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v20, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v20
+; RV32-NEXT: vand.vx v20, v8, a4
+; RV32-NEXT: vsll.vi v20, v20, 24
+; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
-; RV32-NEXT: vor.vv v16, v16, v24
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: vor.vv v20, v20, v24
+; RV32-NEXT: vor.vv v12, v12, v20
+; RV32-NEXT: vsrl.vx v20, v8, a1
; RV32-NEXT: vsrl.vx v24, v8, a3
; RV32-NEXT: vand.vx v24, v24, a2
-; RV32-NEXT: vor.vv v16, v24, v16
+; RV32-NEXT: vor.vv v20, v24, v20
; RV32-NEXT: vsrl.vi v24, v8, 24
; RV32-NEXT: vand.vx v24, v24, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v20
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: vor.vv v8, v8, v16
+; RV32-NEXT: vor.vv v8, v8, v20
; RV32-NEXT: vor.vv v8, v12, v8
; RV32-NEXT: vsrl.vi v12, v8, 4
; RV32-NEXT: lui a1, 61681
@@ -2311,20 +2311,23 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 16
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a5, sp, 16
+; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a5, vlenb
; RV32-NEXT: slli a5, a5, 4
@@ -2337,10 +2340,10 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64(<vscale x 7 x i64> %va, <vscale
; RV32-NEXT: add a5, sp, a5
; RV32-NEXT: addi a5, a5, 16
; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -2508,13 +2511,13 @@ define <vscale x 7 x i64> @vp_bitreverse_nxv7i64_unmasked(<vscale x 7 x i64> %va
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v24, v0, v24
@@ -2669,20 +2672,23 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 16
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a5, sp, 16
+; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a5, vlenb
; RV32-NEXT: slli a5, a5, 4
@@ -2695,10 +2701,10 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64(<vscale x 8 x i64> %va, <vscale
; RV32-NEXT: add a5, sp, a5
; RV32-NEXT: addi a5, a5, 16
; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -2866,13 +2872,13 @@ define <vscale x 8 x i64> @vp_bitreverse_nxv8i64_unmasked(<vscale x 8 x i64> %va
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v24, v0, v24
@@ -3056,13 +3062,13 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: .LBB46_2:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
@@ -3107,8 +3113,8 @@ define <vscale x 64 x i16> @vp_bitreverse_nxv64i16(<vscale x 64 x i16> %va, <vsc
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVBB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVBB-NEXT: vbrev.v v8, v8, v0.t
; CHECK-ZVBB-NEXT: ret
%v = call <vscale x 64 x i16> @llvm.vp.bitreverse.nxv64i16(<vscale x 64 x i16> %va, <vscale x 64 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
index 800d06c5a78f..aadd9852af11 100644
--- a/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/bswap-vp.ll
@@ -525,27 +525,27 @@ define <vscale x 1 x i64> @vp_bswap_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3, v0.t
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v10, v10, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11, v0.t
+; RV32-NEXT: vand.vx v11, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v11, v11, 24, v0.t
+; RV32-NEXT: vand.vv v12, v8, v10, v0.t
; RV32-NEXT: vsll.vi v12, v12, 8, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t
+; RV32-NEXT: vor.vv v11, v11, v12, v0.t
+; RV32-NEXT: vor.vv v9, v9, v11, v0.t
+; RV32-NEXT: vsrl.vx v11, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a3, v0.t
; RV32-NEXT: vand.vx v12, v12, a2, v0.t
-; RV32-NEXT: vor.vv v10, v12, v10, v0.t
+; RV32-NEXT: vor.vv v11, v12, v11, v0.t
; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t
; RV32-NEXT: vand.vx v12, v12, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v11, v0.t
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
-; RV32-NEXT: vor.vv v8, v8, v10, v0.t
+; RV32-NEXT: vor.vv v8, v8, v11, v0.t
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -609,27 +609,27 @@ define <vscale x 1 x i64> @vp_bswap_nxv1i64_unmasked(<vscale x 1 x i64> %va, i32
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3
; RV32-NEXT: vor.vv v9, v9, v10
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4
-; RV32-NEXT: vsll.vi v10, v10, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11
+; RV32-NEXT: vand.vx v11, v8, a4
+; RV32-NEXT: vsll.vi v11, v11, 24
+; RV32-NEXT: vand.vv v12, v8, v10
; RV32-NEXT: vsll.vi v12, v12, 8
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: vsrl.vx v10, v8, a1
+; RV32-NEXT: vor.vv v11, v11, v12
+; RV32-NEXT: vor.vv v9, v9, v11
+; RV32-NEXT: vsrl.vx v11, v8, a1
; RV32-NEXT: vsrl.vx v12, v8, a3
; RV32-NEXT: vand.vx v12, v12, a2
-; RV32-NEXT: vor.vv v10, v12, v10
+; RV32-NEXT: vor.vv v11, v12, v11
; RV32-NEXT: vsrl.vi v12, v8, 24
; RV32-NEXT: vand.vx v12, v12, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v11
+; RV32-NEXT: vand.vv v8, v8, v10
; RV32-NEXT: vor.vv v8, v8, v12
-; RV32-NEXT: vor.vv v8, v8, v10
+; RV32-NEXT: vor.vv v8, v8, v11
; RV32-NEXT: vor.vv v8, v9, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -695,27 +695,27 @@ define <vscale x 2 x i64> @vp_bswap_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3, v0.t
; RV32-NEXT: vor.vv v10, v10, v12, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v12, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14, v0.t
+; RV32-NEXT: vand.vx v14, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v14, v14, 24, v0.t
+; RV32-NEXT: vand.vv v16, v8, v12, v0.t
; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
-; RV32-NEXT: vor.vv v12, v12, v16, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t
+; RV32-NEXT: vor.vv v14, v14, v16, v0.t
+; RV32-NEXT: vor.vv v10, v10, v14, v0.t
+; RV32-NEXT: vsrl.vx v14, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v12, v16, v12, v0.t
+; RV32-NEXT: vor.vv v14, v16, v14, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 24, v0.t
; RV32-NEXT: vand.vx v16, v16, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v14, v0.t
+; RV32-NEXT: vand.vv v8, v8, v12, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vor.vv v8, v8, v12, v0.t
+; RV32-NEXT: vor.vv v8, v8, v14, v0.t
; RV32-NEXT: vor.vv v8, v10, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -779,27 +779,27 @@ define <vscale x 2 x i64> @vp_bswap_nxv2i64_unmasked(<vscale x 2 x i64> %va, i32
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3
; RV32-NEXT: vor.vv v10, v10, v12
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m2, ta, ma
+; RV32-NEXT: vlse64.v v12, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4
-; RV32-NEXT: vsll.vi v12, v12, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14
+; RV32-NEXT: vand.vx v14, v8, a4
+; RV32-NEXT: vsll.vi v14, v14, 24
+; RV32-NEXT: vand.vv v16, v8, v12
; RV32-NEXT: vsll.vi v16, v16, 8
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vsrl.vx v12, v8, a1
+; RV32-NEXT: vor.vv v14, v14, v16
+; RV32-NEXT: vor.vv v10, v10, v14
+; RV32-NEXT: vsrl.vx v14, v8, a1
; RV32-NEXT: vsrl.vx v16, v8, a3
; RV32-NEXT: vand.vx v16, v16, a2
-; RV32-NEXT: vor.vv v12, v16, v12
+; RV32-NEXT: vor.vv v14, v16, v14
; RV32-NEXT: vsrl.vi v16, v8, 24
; RV32-NEXT: vand.vx v16, v16, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v14
+; RV32-NEXT: vand.vv v8, v8, v12
; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vor.vv v8, v8, v12
+; RV32-NEXT: vor.vv v8, v8, v14
; RV32-NEXT: vor.vv v8, v10, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -865,13 +865,13 @@ define <vscale x 4 x i64> @vp_bswap_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3, v0.t
; RV32-NEXT: vor.vv v16, v12, v16, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v12, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v20, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vx v20, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v20, v20, 24, v0.t
; RV32-NEXT: vand.vv v24, v8, v12, v0.t
; RV32-NEXT: vsll.vi v24, v24, 8, v0.t
; RV32-NEXT: vor.vv v20, v20, v24, v0.t
@@ -949,27 +949,27 @@ define <vscale x 4 x i64> @vp_bswap_nxv4i64_unmasked(<vscale x 4 x i64> %va, i32
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3
; RV32-NEXT: vor.vv v12, v12, v16
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m4, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v16, v16, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v20, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v20
+; RV32-NEXT: vand.vx v20, v8, a4
+; RV32-NEXT: vsll.vi v20, v20, 24
+; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
-; RV32-NEXT: vor.vv v16, v16, v24
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: vor.vv v20, v20, v24
+; RV32-NEXT: vor.vv v12, v12, v20
+; RV32-NEXT: vsrl.vx v20, v8, a1
; RV32-NEXT: vsrl.vx v24, v8, a3
; RV32-NEXT: vand.vx v24, v24, a2
-; RV32-NEXT: vor.vv v16, v24, v16
+; RV32-NEXT: vor.vv v20, v24, v20
; RV32-NEXT: vsrl.vi v24, v8, 24
; RV32-NEXT: vand.vx v24, v24, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v20
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: vor.vv v8, v8, v16
+; RV32-NEXT: vor.vv v8, v8, v20
; RV32-NEXT: vor.vv v8, v12, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -1045,20 +1045,23 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 16
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
@@ -1071,10 +1074,10 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64(<vscale x 7 x i64> %va, <vscale x 7
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -1185,13 +1188,13 @@ define <vscale x 7 x i64> @vp_bswap_nxv7i64_unmasked(<vscale x 7 x i64> %va, i32
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v24, v0, v24
@@ -1288,20 +1291,23 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 16
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
@@ -1314,10 +1320,10 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -1428,13 +1434,13 @@ define <vscale x 8 x i64> @vp_bswap_nxv8i64_unmasked(<vscale x 8 x i64> %va, i32
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v24, v0, v24
@@ -1539,13 +1545,13 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
@@ -1575,8 +1581,8 @@ define <vscale x 64 x i16> @vp_bswap_nxv64i16(<vscale x 64 x i16> %va, <vscale x
; CHECK-ZVKB-NEXT: # %bb.1:
; CHECK-ZVKB-NEXT: mv a0, a1
; CHECK-ZVKB-NEXT: .LBB32_2:
-; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVKB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVKB-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-ZVKB-NEXT: vrev8.v v8, v8, v0.t
; CHECK-ZVKB-NEXT: ret
%v = call <vscale x 64 x i16> @llvm.vp.bswap.nxv64i16(<vscale x 64 x i16> %va, <vscale x 64 x i1> %m, i32 %evl)
@@ -1646,27 +1652,27 @@ define <vscale x 1 x i48> @vp_bswap_nxv1i48(<vscale x 1 x i48> %va, <vscale x 1
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3, v0.t
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
+; RV32-NEXT: addi a4, sp, 8
+; RV32-NEXT: vsetvli a5, zero, e64, m1, ta, ma
+; RV32-NEXT: vlse64.v v10, (a4), zero
; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v10, v10, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
-; RV32-NEXT: vsetvli a6, zero, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11, v0.t
+; RV32-NEXT: vand.vx v11, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v11, v11, 24, v0.t
+; RV32-NEXT: vand.vv v12, v8, v10, v0.t
; RV32-NEXT: vsll.vi v12, v12, 8, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t
+; RV32-NEXT: vor.vv v11, v11, v12, v0.t
+; RV32-NEXT: vor.vv v9, v9, v11, v0.t
+; RV32-NEXT: vsrl.vx v11, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a3, v0.t
; RV32-NEXT: vand.vx v12, v12, a2, v0.t
-; RV32-NEXT: vor.vv v10, v12, v10, v0.t
+; RV32-NEXT: vor.vv v11, v12, v11, v0.t
; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t
; RV32-NEXT: vand.vx v12, v12, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v11, v0.t
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
-; RV32-NEXT: vor.vv v8, v8, v10, v0.t
+; RV32-NEXT: vor.vv v8, v8, v11, v0.t
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 16, v0.t
; RV32-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
index 0a7fa38b0c8a..2f0d5bb6e19c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/calling-conv-fastcc.ll
@@ -236,53 +236,40 @@ define fastcc <vscale x 32 x i32> @ret_nxv32i32_param_nxv32i32_nxv32i32_nxv32i32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: li a3, 24
-; CHECK-NEXT: mul a1, a1, a3
+; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv8r.v v24, v8
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a3, a2, a1
; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vl8re32.v v8, (a1)
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vl8re32.v v0, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-NEXT: vl8re32.v v8, (a3)
+; CHECK-NEXT: vl8re32.v v24, (a0)
+; CHECK-NEXT: vl8re32.v v0, (a1)
+; CHECK-NEXT: vl8re32.v v16, (a3)
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vl8re32.v v16, (a2)
-; CHECK-NEXT: vadd.vv v0, v24, v0
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli a0, zero, e32, m8, ta, ma
+; CHECK-NEXT: vadd.vv v24, v8, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vadd.vv v24, v24, v8
+; CHECK-NEXT: vadd.vv v0, v8, v0
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vadd.vv v8, v24, v8
-; CHECK-NEXT: vadd.vv v24, v0, v16
+; CHECK-NEXT: vadd.vv v8, v0, v8
+; CHECK-NEXT: vadd.vv v24, v24, v16
; CHECK-NEXT: vadd.vx v16, v8, a4
; CHECK-NEXT: vadd.vx v8, v24, a4
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
index aa11e012af20..dec67721514d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ceil-vp.ll
@@ -135,16 +135,16 @@ declare <vscale x 8 x half> @llvm.vp.ceil.nxv8f16(<vscale x 8 x half>, <vscale x
define <vscale x 8 x half> @vp_ceil_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -179,16 +179,16 @@ declare <vscale x 16 x half> @llvm.vp.ceil.nxv16f16(<vscale x 16 x half>, <vscal
define <vscale x 16 x half> @vp_ceil_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI8_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -223,16 +223,16 @@ declare <vscale x 32 x half> @llvm.vp.ceil.nxv32f16(<vscale x 32 x half>, <vscal
define <vscale x 32 x half> @vp_ceil_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI10_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -359,8 +359,8 @@ define <vscale x 4 x float> @vp_ceil_vv_nxv4f32(<vscale x 4 x float> %va, <vscal
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -403,8 +403,8 @@ define <vscale x 8 x float> @vp_ceil_vv_nxv8f32(<vscale x 8 x float> %va, <vscal
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -447,8 +447,8 @@ define <vscale x 16 x float> @vp_ceil_vv_nxv16f32(<vscale x 16 x float> %va, <vs
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -525,16 +525,16 @@ declare <vscale x 2 x double> @llvm.vp.ceil.nxv2f64(<vscale x 2 x double>, <vsca
define <vscale x 2 x double> @vp_ceil_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -569,16 +569,16 @@ declare <vscale x 4 x double> @llvm.vp.ceil.nxv4f64(<vscale x 4 x double>, <vsca
define <vscale x 4 x double> @vp_ceil_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -613,16 +613,16 @@ declare <vscale x 7 x double> @llvm.vp.ceil.nxv7f64(<vscale x 7 x double>, <vsca
define <vscale x 7 x double> @vp_ceil_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -657,16 +657,16 @@ declare <vscale x 8 x double> @llvm.vp.ceil.nxv8f64(<vscale x 8 x double>, <vsca
define <vscale x 8 x double> @vp_ceil_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -705,66 +705,56 @@ define <vscale x 16 x double> @vp_ceil_vv_nxv16f64(<vscale x 16 x double> %va, <
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a2
+; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a2, 3
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll b/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll
index ed434deea1a8..482cf83d540c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/combine-store-extract-crash.ll
@@ -10,19 +10,19 @@ define void @test(ptr %ref_array, ptr %sad_array) {
; RV32-NEXT: th.lwd a2, a3, (a0), 0, 3
; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV32-NEXT: vle8.v v8, (a2)
-; RV32-NEXT: vmv.v.i v9, 0
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vmv.s.x v8, zero
-; RV32-NEXT: vredsum.vs v10, v12, v8
-; RV32-NEXT: vmv.x.s a0, v10
+; RV32-NEXT: vredsum.vs v9, v12, v8
+; RV32-NEXT: vmv.x.s a0, v9
; RV32-NEXT: th.swia a0, (a1), 4, 0
; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV32-NEXT: vle8.v v10, (a3)
+; RV32-NEXT: vle8.v v9, (a3)
+; RV32-NEXT: vmv.v.i v10, 0
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV32-NEXT: vslideup.vi v10, v9, 4
+; RV32-NEXT: vslideup.vi v9, v10, 4
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT: vzext.vf4 v12, v10
+; RV32-NEXT: vzext.vf4 v12, v9
; RV32-NEXT: vredsum.vs v8, v12, v8
; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vse32.v v8, (a1)
@@ -33,19 +33,19 @@ define void @test(ptr %ref_array, ptr %sad_array) {
; RV64-NEXT: th.ldd a2, a3, (a0), 0, 4
; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64-NEXT: vle8.v v8, (a2)
-; RV64-NEXT: vmv.v.i v9, 0
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV64-NEXT: vzext.vf4 v12, v8
; RV64-NEXT: vmv.s.x v8, zero
-; RV64-NEXT: vredsum.vs v10, v12, v8
-; RV64-NEXT: vmv.x.s a0, v10
+; RV64-NEXT: vredsum.vs v9, v12, v8
+; RV64-NEXT: vmv.x.s a0, v9
; RV64-NEXT: th.swia a0, (a1), 4, 0
; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV64-NEXT: vle8.v v10, (a3)
+; RV64-NEXT: vle8.v v9, (a3)
+; RV64-NEXT: vmv.v.i v10, 0
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV64-NEXT: vslideup.vi v10, v9, 4
+; RV64-NEXT: vslideup.vi v9, v10, 4
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV64-NEXT: vzext.vf4 v12, v10
+; RV64-NEXT: vzext.vf4 v12, v9
; RV64-NEXT: vredsum.vs v8, v12, v8
; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64-NEXT: vse32.v v8, (a1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
index 673008d9c0b3..52811133c53f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/compressstore.ll
@@ -197,28 +197,51 @@ entry:
define void @test_compresstore_v256i8(ptr %p, <256 x i1> %mask, <256 x i8> %data) {
; RV64-LABEL: test_compresstore_v256i8:
; RV64: # %bb.0: # %entry
-; RV64-NEXT: vmv1r.v v7, v8
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 4
+; RV64-NEXT: sub sp, sp, a2
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; RV64-NEXT: li a2, 128
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV64-NEXT: vle8.v v24, (a1)
+; RV64-NEXT: vle8.v v16, (a1)
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT: vslidedown.vi v9, v0, 1
; RV64-NEXT: vmv.x.s a1, v9
; RV64-NEXT: vmv.x.s a3, v0
+; RV64-NEXT: csrr a4, vlenb
+; RV64-NEXT: slli a4, a4, 3
+; RV64-NEXT: add a4, sp, a4
+; RV64-NEXT: addi a4, a4, 16
+; RV64-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV64-NEXT: vcompress.vm v8, v16, v0
+; RV64-NEXT: vcompress.vm v16, v24, v0
; RV64-NEXT: vcpop.m a4, v0
; RV64-NEXT: vsetvli zero, a4, e8, m8, ta, ma
-; RV64-NEXT: vse8.v v8, (a0)
+; RV64-NEXT: vse8.v v16, (a0)
+; RV64-NEXT: addi a4, sp, 16
+; RV64-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV64-NEXT: vcompress.vm v8, v24, v7
-; RV64-NEXT: vcpop.m a2, v7
+; RV64-NEXT: vcompress.vm v16, v24, v8
+; RV64-NEXT: vcpop.m a2, v8
; RV64-NEXT: cpop a3, a3
; RV64-NEXT: cpop a1, a1
; RV64-NEXT: add a0, a0, a3
; RV64-NEXT: add a0, a0, a1
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV64-NEXT: vse8.v v8, (a0)
+; RV64-NEXT: vse8.v v16, (a0)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 4
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
;
; RV32-LABEL: test_compresstore_v256i8:
@@ -796,18 +819,18 @@ define void @test_compresstore_v32i64(ptr %p, <32 x i1> %mask, <32 x i64> %data)
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vse64.v v24, (a0)
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vi v24, v0, 2
+; RV64-NEXT: vslidedown.vi v8, v0, 2
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vcompress.vm v8, v16, v24
+; RV64-NEXT: vcompress.vm v24, v16, v8
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV64-NEXT: vmv.x.s a1, v0
; RV64-NEXT: zext.h a1, a1
; RV64-NEXT: cpopw a1, a1
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a0, a0, a1
-; RV64-NEXT: vcpop.m a1, v24
+; RV64-NEXT: vcpop.m a1, v8
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: vse64.v v8, (a0)
+; RV64-NEXT: vse64.v v24, (a0)
; RV64-NEXT: ret
;
; RV32-LABEL: test_compresstore_v32i64:
@@ -818,18 +841,18 @@ define void @test_compresstore_v32i64(ptr %p, <32 x i1> %mask, <32 x i64> %data)
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vse64.v v24, (a0)
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v0, 2
+; RV32-NEXT: vslidedown.vi v8, v0, 2
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vcompress.vm v8, v16, v24
+; RV32-NEXT: vcompress.vm v24, v16, v8
; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV32-NEXT: vmv.x.s a1, v0
; RV32-NEXT: zext.h a1, a1
; RV32-NEXT: cpop a1, a1
; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: vcpop.m a1, v24
+; RV32-NEXT: vcpop.m a1, v8
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: vse64.v v24, (a0)
; RV32-NEXT: ret
entry:
tail call void @llvm.masked.compressstore.v32i64(<32 x i64> %data, ptr align 8 %p, <32 x i1> %mask)
diff --git a/llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll b/llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll
index bd65ed52be68..1343b64b876d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/concat-vector-insert-elt.ll
@@ -189,16 +189,16 @@ define void @v4xi64_concat_vector_insert_idx3(ptr %a, ptr %b, i64 %x) {
; RV32-LABEL: v4xi64_concat_vector_insert_idx3:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vle64.v v8, (a0)
-; RV32-NEXT: vle64.v v10, (a1)
+; RV32-NEXT: vle64.v v8, (a1)
+; RV32-NEXT: vle64.v v10, (a0)
; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v9, v8, a2
; RV32-NEXT: vslide1down.vx v9, v9, a3
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vslideup.vi v10, v9, 1
+; RV32-NEXT: vslideup.vi v8, v9, 1
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vslideup.vi v8, v10, 2
-; RV32-NEXT: vse64.v v8, (a0)
+; RV32-NEXT: vslideup.vi v10, v8, 2
+; RV32-NEXT: vse64.v v10, (a0)
; RV32-NEXT: ret
;
; RV64-LABEL: v4xi64_concat_vector_insert_idx3:
diff --git a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
index 113154c0f985..7839b602706d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/constant-folding-crash.ll
@@ -19,18 +19,19 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
; RV32-LABEL: constant_folding_crash:
; RV32: # %bb.0: # %entry
; RV32-NEXT: lw a0, 8(a0)
-; RV32-NEXT: vmv1r.v v10, v0
; RV32-NEXT: andi a0, a0, 1
; RV32-NEXT: seqz a0, a0
; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV32-NEXT: vmv.v.x v11, a0
-; RV32-NEXT: vmsne.vi v0, v11, 0
+; RV32-NEXT: vmv.v.x v10, a0
+; RV32-NEXT: vmsne.vi v10, v10, 0
+; RV32-NEXT: vmv1r.v v11, v0
+; RV32-NEXT: vmv1r.v v0, v10
; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32-NEXT: vmerge.vvm v8, v9, v8, v0
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV32-NEXT: vmv.v.i v8, 0
-; RV32-NEXT: vmv1r.v v0, v10
+; RV32-NEXT: vmv1r.v v0, v11
; RV32-NEXT: vmerge.vim v8, v8, 1, v0
; RV32-NEXT: vrgather.vi v9, v8, 0
; RV32-NEXT: vmsne.vi v0, v9, 0
@@ -42,18 +43,19 @@ define void @constant_folding_crash(ptr %v54, <4 x ptr> %lanes.a, <4 x ptr> %lan
; RV64-LABEL: constant_folding_crash:
; RV64: # %bb.0: # %entry
; RV64-NEXT: ld a0, 8(a0)
-; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: andi a0, a0, 1
; RV64-NEXT: seqz a0, a0
; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; RV64-NEXT: vmv.v.x v13, a0
-; RV64-NEXT: vmsne.vi v0, v13, 0
+; RV64-NEXT: vmv.v.x v12, a0
+; RV64-NEXT: vmsne.vi v12, v12, 0
+; RV64-NEXT: vmv1r.v v13, v0
+; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vmerge.vvm v8, v10, v8, v0
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV64-NEXT: vmv.v.i v8, 0
-; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: vmerge.vim v8, v8, 1, v0
; RV64-NEXT: vrgather.vi v9, v8, 0
; RV64-NEXT: vmsne.vi v0, v9, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
index 41ec102c34ef..6e538f3dfb38 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-sdnode.ll
@@ -3341,16 +3341,16 @@ define <vscale x 8 x i64> @ctlz_zero_undef_nxv8i64(<vscale x 8 x i64> %va) {
;
; RV32F-LABEL: ctlz_zero_undef_nxv8i64:
; RV32F: # %bb.0:
-; RV32F-NEXT: vmv8r.v v16, v8
; RV32F-NEXT: li a0, 190
; RV32F-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32F-NEXT: vmv.v.x v8, a0
+; RV32F-NEXT: vmv.v.x v16, a0
; RV32F-NEXT: fsrmi a0, 1
; RV32F-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; RV32F-NEXT: vfncvt.f.xu.w v24, v16
-; RV32F-NEXT: vsrl.vi v16, v24, 23
-; RV32F-NEXT: vwsubu.wv v8, v8, v16
+; RV32F-NEXT: vfncvt.f.xu.w v24, v8
+; RV32F-NEXT: vsrl.vi v8, v24, 23
+; RV32F-NEXT: vwsubu.wv v16, v16, v8
; RV32F-NEXT: fsrm a0
+; RV32F-NEXT: vmv8r.v v8, v16
; RV32F-NEXT: ret
;
; RV64F-LABEL: ctlz_zero_undef_nxv8i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
index 86086f5dc88f..fff280c005b5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll
@@ -1259,8 +1259,8 @@ define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB46_2:
; CHECK-NEXT: fsrmi a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
; CHECK-NEXT: vsrl.vx v8, v8, a2, v0.t
; CHECK-NEXT: vrsub.vx v8, v8, a3, v0.t
@@ -1285,8 +1285,8 @@ define <vscale x 16 x i64> @vp_ctlz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vclz.v v8, v8, v0.t
; CHECK-ZVBB-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.ctlz.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
@@ -2487,8 +2487,8 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB94_2:
; CHECK-NEXT: fsrmi a1, 1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
; CHECK-NEXT: vsrl.vx v8, v8, a2, v0.t
; CHECK-NEXT: vrsub.vx v8, v8, a3, v0.t
@@ -2512,8 +2512,8 @@ define <vscale x 16 x i64> @vp_ctlz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB94_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vclz.v v8, v8, v0.t
; CHECK-ZVBB-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.ctlz.nxv16i64(<vscale x 16 x i64> %va, i1 true, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
index 883f68aec1f4..e3c53212e91b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll
@@ -2024,8 +2024,7 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb
; RV32-NEXT: vmv1r.v v24, v0
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 40
-; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: slli a1, a1, 5
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -2044,30 +2043,35 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: addi a3, a3, -1
; RV32-NEXT: and a2, a3, a2
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: lui a3, 349525
; RV32-NEXT: addi a3, a3, 1365
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a3
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a3
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
@@ -2078,64 +2082,64 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: lui a3, 209715
; RV32-NEXT: addi a3, a3, 819
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a3
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a3
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: lui a3, 61681
; RV32-NEXT: addi a3, a3, -241
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a3
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a3
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
; RV32-NEXT: lui a3, 4112
; RV32-NEXT: addi a3, a3, 257
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v8, a3
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 16
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 4
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
+; RV32-NEXT: vmul.vv v16, v16, v8, v0.t
; RV32-NEXT: li a2, 56
-; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
+; RV32-NEXT: vsrl.vx v8, v16, a2, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
@@ -2145,14 +2149,13 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB46_2:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 48
-; RV32-NEXT: mul a0, a0, a1
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: li a3, 48
+; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
@@ -2161,17 +2164,17 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
@@ -2183,41 +2186,41 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
@@ -2303,13 +2306,13 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV64-NEXT: vand.vx v16, v16, a2, v0.t
; RV64-NEXT: vsub.vv v16, v8, v16, v0.t
@@ -2347,8 +2350,8 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vcpop.v v8, v8, v0.t
; CHECK-ZVBB-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.ctpop.nxv16i64(<vscale x 16 x i64> %va, <vscale x 16 x i1> %m, i32 %evl)
@@ -2375,13 +2378,13 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va,
; RV32-NEXT: addi a3, a3, 1365
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v0, a3
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vand.vv v24, v24, v0
; RV32-NEXT: vsub.vv v24, v16, v24
; RV32-NEXT: lui a3, 209715
@@ -2404,20 +2407,20 @@ define <vscale x 16 x i64> @vp_ctpop_nxv16i64_unmasked(<vscale x 16 x i64> %va,
; RV32-NEXT: addi a3, a3, -241
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v24, a3
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vand.vv v16, v16, v24
; RV32-NEXT: lui a3, 4112
; RV32-NEXT: addi a3, a3, 257
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v24, a3
+; RV32-NEXT: addi a3, sp, 16
+; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; RV32-NEXT: vmul.vv v16, v16, v24
; RV32-NEXT: li a2, 56
; RV32-NEXT: vsrl.vx v16, v16, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
index 4a001662ce2c..0ef0a431dabc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/cttz-vp.ll
@@ -2282,7 +2282,6 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: addi a4, a4, 1365
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v8, a4
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 24
; RV32-NEXT: mul a4, a4, a5
@@ -2295,6 +2294,7 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v16, v8, v0.t
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 40
@@ -2312,82 +2312,81 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV32-NEXT: lui a4, 209715
; RV32-NEXT: addi a4, a4, 819
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a4
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a4
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 40
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: slli a4, a4, 4
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 40
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
+; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 48
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: slli a4, a4, 4
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: lui a4, 61681
; RV32-NEXT: addi a4, a4, -241
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a4
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v16, a4
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: slli a4, a4, 4
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
; RV32-NEXT: lui a4, 4112
; RV32-NEXT: addi a4, a4, 257
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v8, a4
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
+; RV32-NEXT: vmul.vv v16, v16, v8, v0.t
; RV32-NEXT: li a3, 56
-; RV32-NEXT: vsrl.vx v8, v8, a3, v0.t
+; RV32-NEXT: vsrl.vx v8, v16, a3, v0.t
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: bltu a0, a1, .LBB46_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB46_2:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vx v8, v16, a2, v0.t
-; RV32-NEXT: vnot.v v16, v16, v0.t
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
+; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsub.vx v16, v8, a2, v0.t
+; RV32-NEXT: vnot.v v8, v8, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
@@ -2549,13 +2548,12 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB46_2:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsub.vx v16, v8, a2, v0.t
; RV64-NEXT: vnot.v v8, v8, v0.t
; RV64-NEXT: vand.vv v8, v8, v16, v0.t
@@ -2596,8 +2594,8 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64(<vscale x 16 x i64> %va, <vscale x
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB46_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vctz.v v8, v8, v0.t
; CHECK-ZVBB-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.cttz.nxv16i64(<vscale x 16 x i64> %va, i1 false, <vscale x 16 x i1> %m, i32 %evl)
@@ -2628,98 +2626,97 @@ define <vscale x 16 x i64> @vp_cttz_nxv16i64_unmasked(<vscale x 16 x i64> %va, i
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v8, v16, a2
; RV32-NEXT: vnot.v v16, v16
-; RV32-NEXT: vand.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v24, v8, 1
+; RV32-NEXT: vand.vv v16, v16, v8
+; RV32-NEXT: vsrl.vi v24, v16, 1
; RV32-NEXT: lui a4, 349525
; RV32-NEXT: addi a4, a4, 1365
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v16, a4
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v8, a4
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: li a5, 24
; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v24, v24, v16
-; RV32-NEXT: vsub.vv v8, v8, v24
+; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v24, v24, v8
+; RV32-NEXT: vsub.vv v16, v16, v24
; RV32-NEXT: lui a4, 209715
; RV32-NEXT: addi a4, a4, 819
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v0, a4
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v24
+; RV32-NEXT: vand.vv v24, v16, v0
+; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: vand.vv v16, v16, v0
+; RV32-NEXT: vadd.vv v16, v24, v16
+; RV32-NEXT: vsrl.vi v24, v16, 4
+; RV32-NEXT: vadd.vv v16, v16, v24
; RV32-NEXT: lui a4, 61681
; RV32-NEXT: addi a4, a4, -241
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v16, a4
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vmv.v.x v8, a4
; RV32-NEXT: csrr a4, vlenb
; RV32-NEXT: slli a4, a4, 4
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vand.vv v16, v8, v16
+; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v16, v8
; RV32-NEXT: lui a4, 4112
; RV32-NEXT: addi a4, a4, 257
; RV32-NEXT: vsetvli a5, zero, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.x v8, a4
+; RV32-NEXT: vmv.v.x v16, a4
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vmul.vv v16, v16, v8
+; RV32-NEXT: vmul.vv v8, v8, v16
; RV32-NEXT: li a3, 56
-; RV32-NEXT: vsrl.vx v8, v16, a3
+; RV32-NEXT: vsrl.vx v8, v8, a3
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: bltu a0, a1, .LBB47_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB47_2:
+; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vx v16, v24, a2
+; RV32-NEXT: vsub.vx v8, v24, a2
; RV32-NEXT: vnot.v v24, v24
-; RV32-NEXT: vand.vv v16, v24, v16
-; RV32-NEXT: vsrl.vi v24, v16, 1
+; RV32-NEXT: vand.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 1
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v24, v24, v8
-; RV32-NEXT: vsub.vv v16, v16, v24
-; RV32-NEXT: vand.vv v24, v16, v0
-; RV32-NEXT: vsrl.vi v16, v16, 2
-; RV32-NEXT: vand.vv v16, v16, v0
-; RV32-NEXT: vadd.vv v16, v24, v16
-; RV32-NEXT: vsrl.vi v24, v16, 4
-; RV32-NEXT: vadd.vv v16, v16, v24
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vsub.vv v8, v8, v24
+; RV32-NEXT: vand.vv v24, v8, v0
+; RV32-NEXT: vsrl.vi v8, v8, 2
+; RV32-NEXT: vand.vv v8, v8, v0
+; RV32-NEXT: vadd.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v24
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v16, v8
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v8, v16
; RV32-NEXT: vsrl.vx v8, v8, a3
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
@@ -4038,13 +4035,12 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB94_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t
; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
; CHECK-NEXT: fsrmi a0, 1
@@ -4077,8 +4073,8 @@ define <vscale x 16 x i64> @vp_cttz_zero_undef_nxv16i64(<vscale x 16 x i64> %va,
; CHECK-ZVBB-NEXT: # %bb.1:
; CHECK-ZVBB-NEXT: mv a0, a1
; CHECK-ZVBB-NEXT: .LBB94_2:
-; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vmv1r.v v0, v24
+; CHECK-ZVBB-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-ZVBB-NEXT: vctz.v v8, v8, v0.t
; CHECK-ZVBB-NEXT: ret
%v = call <vscale x 16 x i64> @llvm.vp.cttz.nxv16i64(<vscale x 16 x i64> %va, i1 true, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
index e69b4789a09a..14719e190a69 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extractelt-i1.ll
@@ -78,7 +78,6 @@ define i1 @extractelt_nxv16i1(ptr %x, i64 %idx) nounwind {
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -96,7 +95,6 @@ define i1 @extractelt_nxv32i1(ptr %x, i64 %idx) nounwind {
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -114,7 +112,6 @@ define i1 @extractelt_nxv64i1(ptr %x, i64 %idx) nounwind {
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -142,22 +139,22 @@ define i1 @extractelt_nxv128i1(ptr %x, i64 %idx) nounwind {
; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: sub sp, sp, a3
; RV32-NEXT: andi sp, sp, -64
-; RV32-NEXT: addi a3, sp, 64
; RV32-NEXT: slli a2, a2, 3
-; RV32-NEXT: add a4, a0, a2
-; RV32-NEXT: vl8r.v v16, (a4)
+; RV32-NEXT: add a3, a0, a2
+; RV32-NEXT: vl8r.v v16, (a3)
; RV32-NEXT: vl8r.v v24, (a0)
-; RV32-NEXT: add a1, a3, a1
-; RV32-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV32-NEXT: addi a0, sp, 64
+; RV32-NEXT: add a1, a0, a1
+; RV32-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; RV32-NEXT: vmseq.vi v8, v16, 0
; RV32-NEXT: vmseq.vi v0, v24, 0
; RV32-NEXT: vmv.v.i v16, 0
; RV32-NEXT: vmerge.vim v24, v16, 1, v0
-; RV32-NEXT: vs8r.v v24, (a3)
-; RV32-NEXT: add a2, a3, a2
+; RV32-NEXT: vs8r.v v24, (a0)
+; RV32-NEXT: add a0, a0, a2
; RV32-NEXT: vmv1r.v v0, v8
; RV32-NEXT: vmerge.vim v8, v16, 1, v0
-; RV32-NEXT: vs8r.v v8, (a2)
+; RV32-NEXT: vs8r.v v8, (a0)
; RV32-NEXT: lbu a0, 0(a1)
; RV32-NEXT: addi sp, s0, -80
; RV32-NEXT: lw ra, 76(sp) # 4-byte Folded Reload
@@ -182,22 +179,22 @@ define i1 @extractelt_nxv128i1(ptr %x, i64 %idx) nounwind {
; RV64-NEXT: slli a3, a3, 4
; RV64-NEXT: sub sp, sp, a3
; RV64-NEXT: andi sp, sp, -64
-; RV64-NEXT: addi a3, sp, 64
; RV64-NEXT: slli a2, a2, 3
-; RV64-NEXT: add a4, a0, a2
-; RV64-NEXT: vl8r.v v16, (a4)
+; RV64-NEXT: add a3, a0, a2
+; RV64-NEXT: vl8r.v v16, (a3)
; RV64-NEXT: vl8r.v v24, (a0)
-; RV64-NEXT: add a1, a3, a1
-; RV64-NEXT: vsetvli a0, zero, e8, m8, ta, ma
+; RV64-NEXT: addi a0, sp, 64
+; RV64-NEXT: add a1, a0, a1
+; RV64-NEXT: vsetvli a3, zero, e8, m8, ta, ma
; RV64-NEXT: vmseq.vi v8, v16, 0
; RV64-NEXT: vmseq.vi v0, v24, 0
; RV64-NEXT: vmv.v.i v16, 0
; RV64-NEXT: vmerge.vim v24, v16, 1, v0
-; RV64-NEXT: vs8r.v v24, (a3)
-; RV64-NEXT: add a2, a3, a2
+; RV64-NEXT: vs8r.v v24, (a0)
+; RV64-NEXT: add a0, a0, a2
; RV64-NEXT: vmv1r.v v0, v8
; RV64-NEXT: vmerge.vim v8, v16, 1, v0
-; RV64-NEXT: vs8r.v v8, (a2)
+; RV64-NEXT: vs8r.v v8, (a0)
; RV64-NEXT: lbu a0, 0(a1)
; RV64-NEXT: addi sp, s0, -80
; RV64-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
index 875f4f239028..6b8d778bc324 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-i8-index-cornercase.ll
@@ -35,10 +35,10 @@ define <512 x i8> @single_source(<512 x i8> %a) {
; CHECK-NEXT: vslidedown.vi v16, v16, 4
; CHECK-NEXT: li a0, 466
; CHECK-NEXT: li a1, 465
+; CHECK-NEXT: lbu a2, 1012(sp)
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
-; CHECK-NEXT: lbu a0, 1012(sp)
; CHECK-NEXT: vslideup.vx v8, v16, a1
-; CHECK-NEXT: vmv.s.x v16, a0
+; CHECK-NEXT: vmv.s.x v16, a2
; CHECK-NEXT: li a0, 501
; CHECK-NEXT: li a1, 500
; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, ma
@@ -118,16 +118,16 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
; CHECK-NEXT: vslidedown.vi v24, v24, 4
; CHECK-NEXT: li a1, 466
; CHECK-NEXT: li a2, 465
+; CHECK-NEXT: lbu a3, 985(sp)
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
-; CHECK-NEXT: lbu a1, 985(sp)
; CHECK-NEXT: vslideup.vx v8, v24, a2
-; CHECK-NEXT: vmv.s.x v24, a1
+; CHECK-NEXT: vmv.s.x v24, a3
; CHECK-NEXT: li a1, 478
; CHECK-NEXT: li a2, 477
+; CHECK-NEXT: lbu a3, 1012(sp)
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
-; CHECK-NEXT: lbu a1, 1012(sp)
; CHECK-NEXT: vslideup.vx v8, v24, a2
-; CHECK-NEXT: vmv.s.x v24, a1
+; CHECK-NEXT: vmv.s.x v24, a3
; CHECK-NEXT: li a1, 501
; CHECK-NEXT: li a2, 500
; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
@@ -137,21 +137,21 @@ define <512 x i8> @two_source(<512 x i8> %a, <512 x i8> %b) {
; CHECK-NEXT: addi a1, a1, 501
; CHECK-NEXT: slli a1, a1, 13
; CHECK-NEXT: addi a1, a1, 512
+; CHECK-NEXT: vsetivli zero, 8, e64, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v24, 0
+; CHECK-NEXT: lui a2, 1047552
+; CHECK-NEXT: addiw a2, a2, 1
+; CHECK-NEXT: slli a2, a2, 23
+; CHECK-NEXT: addi a2, a2, 1
+; CHECK-NEXT: slli a2, a2, 18
+; CHECK-NEXT: vslide1down.vx v0, v24, a2
+; CHECK-NEXT: lui a2, 4
+; CHECK-NEXT: vmv.s.x v24, a2
; CHECK-NEXT: li a2, 64
+; CHECK-NEXT: vsetivli zero, 7, e64, m1, tu, ma
+; CHECK-NEXT: vslideup.vi v0, v24, 6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv.v.x v24, a1
-; CHECK-NEXT: vsetivli zero, 8, e64, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v7, 0
-; CHECK-NEXT: lui a1, 1047552
-; CHECK-NEXT: addiw a1, a1, 1
-; CHECK-NEXT: slli a1, a1, 23
-; CHECK-NEXT: addi a1, a1, 1
-; CHECK-NEXT: slli a1, a1, 18
-; CHECK-NEXT: vslide1down.vx v0, v7, a1
-; CHECK-NEXT: lui a1, 4
-; CHECK-NEXT: vmv.s.x v7, a1
-; CHECK-NEXT: vsetivli zero, 7, e64, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v0, v7, 6
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
; CHECK-NEXT: vrgather.vv v8, v16, v24, v0.t
; CHECK-NEXT: addi sp, s0, -1536
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
index c0d366760d07..f3e823562888 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll
@@ -417,8 +417,8 @@ declare <32 x i64> @llvm.vp.abs.v32i64(<32 x i64>, i1 immarg, <32 x i1>, i32)
define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_abs_v32i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB34_2
@@ -432,8 +432,8 @@ define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl)
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vrsub.vi v24, v16, 0, v0.t
; CHECK-NEXT: vmax.vv v16, v16, v24, v0.t
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
index 943fc58d637a..068c25b82100 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bitreverse-vp.ll
@@ -847,27 +847,27 @@ define <2 x i64> @vp_bitreverse_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %e
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3, v0.t
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v10, v10, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
+; RV32-NEXT: vlse64.v v10, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11, v0.t
+; RV32-NEXT: vand.vx v11, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v11, v11, 24, v0.t
+; RV32-NEXT: vand.vv v12, v8, v10, v0.t
; RV32-NEXT: vsll.vi v12, v12, 8, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t
+; RV32-NEXT: vor.vv v11, v11, v12, v0.t
+; RV32-NEXT: vor.vv v9, v9, v11, v0.t
+; RV32-NEXT: vsrl.vx v11, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a3, v0.t
; RV32-NEXT: vand.vx v12, v12, a2, v0.t
-; RV32-NEXT: vor.vv v10, v12, v10, v0.t
+; RV32-NEXT: vor.vv v11, v12, v11, v0.t
; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t
; RV32-NEXT: vand.vx v12, v12, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v11, v0.t
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
-; RV32-NEXT: vor.vv v8, v8, v10, v0.t
+; RV32-NEXT: vor.vv v8, v8, v11, v0.t
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t
; RV32-NEXT: lui a1, 61681
@@ -982,27 +982,27 @@ define <2 x i64> @vp_bitreverse_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl)
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3
; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4
-; RV32-NEXT: vsll.vi v10, v10, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
+; RV32-NEXT: vlse64.v v10, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11
+; RV32-NEXT: vand.vx v11, v8, a4
+; RV32-NEXT: vsll.vi v11, v11, 24
+; RV32-NEXT: vand.vv v12, v8, v10
; RV32-NEXT: vsll.vi v12, v12, 8
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: vsrl.vx v10, v8, a1
+; RV32-NEXT: vor.vv v11, v11, v12
+; RV32-NEXT: vor.vv v9, v9, v11
+; RV32-NEXT: vsrl.vx v11, v8, a1
; RV32-NEXT: vsrl.vx v12, v8, a3
; RV32-NEXT: vand.vx v12, v12, a2
-; RV32-NEXT: vor.vv v10, v12, v10
+; RV32-NEXT: vor.vv v11, v12, v11
; RV32-NEXT: vsrl.vi v12, v8, 24
; RV32-NEXT: vand.vx v12, v12, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v11
+; RV32-NEXT: vand.vv v8, v8, v10
; RV32-NEXT: vor.vv v8, v8, v12
-; RV32-NEXT: vor.vv v8, v8, v10
+; RV32-NEXT: vor.vv v8, v8, v11
; RV32-NEXT: vor.vv v8, v9, v8
; RV32-NEXT: vsrl.vi v9, v8, 4
; RV32-NEXT: lui a1, 61681
@@ -1119,27 +1119,27 @@ define <4 x i64> @vp_bitreverse_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %e
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3, v0.t
; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v12, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
+; RV32-NEXT: vlse64.v v12, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14, v0.t
+; RV32-NEXT: vand.vx v14, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v14, v14, 24, v0.t
+; RV32-NEXT: vand.vv v16, v8, v12, v0.t
; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
-; RV32-NEXT: vor.vv v12, v12, v16, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t
+; RV32-NEXT: vor.vv v14, v14, v16, v0.t
+; RV32-NEXT: vor.vv v10, v10, v14, v0.t
+; RV32-NEXT: vsrl.vx v14, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v12, v16, v12, v0.t
+; RV32-NEXT: vor.vv v14, v16, v14, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 24, v0.t
; RV32-NEXT: vand.vx v16, v16, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v14, v0.t
+; RV32-NEXT: vand.vv v8, v8, v12, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vor.vv v8, v8, v12, v0.t
+; RV32-NEXT: vor.vv v8, v8, v14, v0.t
; RV32-NEXT: vor.vv v8, v10, v8, v0.t
; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t
; RV32-NEXT: lui a1, 61681
@@ -1254,27 +1254,27 @@ define <4 x i64> @vp_bitreverse_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl)
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3
; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4
-; RV32-NEXT: vsll.vi v12, v12, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
+; RV32-NEXT: vlse64.v v12, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14
+; RV32-NEXT: vand.vx v14, v8, a4
+; RV32-NEXT: vsll.vi v14, v14, 24
+; RV32-NEXT: vand.vv v16, v8, v12
; RV32-NEXT: vsll.vi v16, v16, 8
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vsrl.vx v12, v8, a1
+; RV32-NEXT: vor.vv v14, v14, v16
+; RV32-NEXT: vor.vv v10, v10, v14
+; RV32-NEXT: vsrl.vx v14, v8, a1
; RV32-NEXT: vsrl.vx v16, v8, a3
; RV32-NEXT: vand.vx v16, v16, a2
-; RV32-NEXT: vor.vv v12, v16, v12
+; RV32-NEXT: vor.vv v14, v16, v14
; RV32-NEXT: vsrl.vi v16, v8, 24
; RV32-NEXT: vand.vx v16, v16, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v14
+; RV32-NEXT: vand.vv v8, v8, v12
; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vor.vv v8, v8, v12
+; RV32-NEXT: vor.vv v8, v8, v14
; RV32-NEXT: vor.vv v8, v10, v8
; RV32-NEXT: vsrl.vi v10, v8, 4
; RV32-NEXT: lui a1, 61681
@@ -1391,13 +1391,13 @@ define <8 x i64> @vp_bitreverse_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %e
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3, v0.t
; RV32-NEXT: vor.vv v16, v12, v16, v0.t
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v20, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a5), zero
+; RV32-NEXT: vlse64.v v12, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vx v20, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v20, v20, 24, v0.t
; RV32-NEXT: vand.vv v24, v8, v12, v0.t
; RV32-NEXT: vsll.vi v24, v24, 8, v0.t
; RV32-NEXT: vor.vv v20, v20, v24, v0.t
@@ -1526,27 +1526,27 @@ define <8 x i64> @vp_bitreverse_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl)
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3
; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v16, v16, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v20, (a5), zero
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v20
+; RV32-NEXT: vand.vx v20, v8, a4
+; RV32-NEXT: vsll.vi v20, v20, 24
+; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
-; RV32-NEXT: vor.vv v16, v16, v24
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: vor.vv v20, v20, v24
+; RV32-NEXT: vor.vv v12, v12, v20
+; RV32-NEXT: vsrl.vx v20, v8, a1
; RV32-NEXT: vsrl.vx v24, v8, a3
; RV32-NEXT: vand.vx v24, v24, a2
-; RV32-NEXT: vor.vv v16, v24, v16
+; RV32-NEXT: vor.vv v20, v24, v20
; RV32-NEXT: vsrl.vi v24, v8, 24
; RV32-NEXT: vand.vx v24, v24, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v20
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: vor.vv v8, v8, v16
+; RV32-NEXT: vor.vv v8, v8, v20
; RV32-NEXT: vor.vv v8, v12, v8
; RV32-NEXT: vsrl.vi v12, v8, 4
; RV32-NEXT: lui a1, 61681
@@ -1685,20 +1685,23 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 48
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 16
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 48
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 48
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a5, sp, 48
+; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a5, vlenb
; RV32-NEXT: slli a5, a5, 4
@@ -1711,10 +1714,10 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
; RV32-NEXT: add a5, sp, a5
; RV32-NEXT: addi a5, a5, 48
; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a1, sp, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -1727,38 +1730,38 @@ define <15 x i64> @vp_bitreverse_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroex
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vor.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: addi a1, sp, 48
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: addi a1, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vor.vv v16, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v8, v16, 4, v0.t
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsll.vi v8, v8, 4, v0.t
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsll.vi v16, v16, 4, v0.t
+; RV32-NEXT: vor.vv v16, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v8, v16, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsll.vi v8, v8, 2, v0.t
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsll.vi v16, v16, 2, v0.t
+; RV32-NEXT: vor.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: vand.vv v16, v16, v24, v0.t
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
; RV32-NEXT: vsll.vi v8, v8, 1, v0.t
@@ -1885,60 +1888,60 @@ define <15 x i64> @vp_bitreverse_v15i64_unmasked(<15 x i64> %va, i32 zeroext %ev
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 48
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 16
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
+; RV32-NEXT: vlse64.v v24, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
-; RV32-NEXT: vsll.vi v24, v24, 8
-; RV32-NEXT: vor.vv v24, v0, v24
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
+; RV32-NEXT: vand.vv v16, v8, v24
+; RV32-NEXT: vsll.vi v16, v16, 8
+; RV32-NEXT: vor.vv v16, v0, v16
; RV32-NEXT: addi a5, sp, 48
; RV32-NEXT: vl8r.v v0, (a5) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v24, v0, v24
-; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vor.vv v16, v0, v16
+; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vx v0, v8, a3
; RV32-NEXT: vand.vx v0, v0, a2
-; RV32-NEXT: vsrl.vx v24, v8, a1
-; RV32-NEXT: vor.vv v24, v0, v24
-; RV32-NEXT: vsrl.vi v0, v8, 8
-; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: vor.vv v0, v0, v16
+; RV32-NEXT: vsrl.vi v16, v8, 8
+; RV32-NEXT: vand.vv v16, v16, v24
; RV32-NEXT: vsrl.vi v8, v8, 24
; RV32-NEXT: vand.vx v8, v8, a4
; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: addi a1, sp, 48
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vand.vv v8, v8, v24
-; RV32-NEXT: vsll.vi v8, v8, 4
-; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 2
+; RV32-NEXT: vor.vv v8, v8, v0
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 4
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vand.vv v8, v8, v24
-; RV32-NEXT: vsll.vi v8, v8, 2
-; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: vsll.vi v8, v8, 4
+; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 2
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vsll.vi v8, v8, 2
+; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 1
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: vadd.vv v8, v8, v8
-; RV32-NEXT: vor.vv v8, v16, v8
+; RV32-NEXT: vor.vv v8, v24, v8
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
@@ -2049,20 +2052,23 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 48
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 16
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 48
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 48
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a5, sp, 48
+; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a5, vlenb
; RV32-NEXT: slli a5, a5, 4
@@ -2075,10 +2081,10 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
; RV32-NEXT: add a5, sp, a5
; RV32-NEXT: addi a5, a5, 48
; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a1, sp, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -2091,38 +2097,38 @@ define <16 x i64> @vp_bitreverse_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroex
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vor.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: addi a1, sp, 48
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: addi a1, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vor.vv v16, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v8, v16, 4, v0.t
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsll.vi v8, v8, 4, v0.t
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsll.vi v16, v16, 4, v0.t
+; RV32-NEXT: vor.vv v16, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v8, v16, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsll.vi v8, v8, 2, v0.t
-; RV32-NEXT: vor.vv v8, v16, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: vand.vv v16, v16, v24, v0.t
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsll.vi v16, v16, 2, v0.t
+; RV32-NEXT: vor.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: vand.vv v16, v16, v24, v0.t
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
; RV32-NEXT: vsll.vi v8, v8, 1, v0.t
@@ -2249,60 +2255,60 @@ define <16 x i64> @vp_bitreverse_v16i64_unmasked(<16 x i64> %va, i32 zeroext %ev
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 48
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 16
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
+; RV32-NEXT: vlse64.v v24, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
-; RV32-NEXT: vsll.vi v24, v24, 8
-; RV32-NEXT: vor.vv v24, v0, v24
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
+; RV32-NEXT: vand.vv v16, v8, v24
+; RV32-NEXT: vsll.vi v16, v16, 8
+; RV32-NEXT: vor.vv v16, v0, v16
; RV32-NEXT: addi a5, sp, 48
; RV32-NEXT: vl8r.v v0, (a5) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v24, v0, v24
-; RV32-NEXT: vs8r.v v24, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vor.vv v16, v0, v16
+; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vx v0, v8, a3
; RV32-NEXT: vand.vx v0, v0, a2
-; RV32-NEXT: vsrl.vx v24, v8, a1
-; RV32-NEXT: vor.vv v24, v0, v24
-; RV32-NEXT: vsrl.vi v0, v8, 8
-; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: vor.vv v0, v0, v16
+; RV32-NEXT: vsrl.vi v16, v8, 8
+; RV32-NEXT: vand.vv v16, v16, v24
; RV32-NEXT: vsrl.vi v8, v8, 24
; RV32-NEXT: vand.vx v8, v8, a4
; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: addi a1, sp, 48
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vand.vv v8, v8, v24
-; RV32-NEXT: vsll.vi v8, v8, 4
-; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 2
+; RV32-NEXT: vor.vv v8, v8, v0
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 4
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vand.vv v8, v8, v24
-; RV32-NEXT: vsll.vi v8, v8, 2
-; RV32-NEXT: vor.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: vsll.vi v8, v8, 4
+; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 2
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
+; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vsll.vi v8, v8, 2
+; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 1
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: vadd.vv v8, v8, v8
-; RV32-NEXT: vor.vv v8, v16, v8
+; RV32-NEXT: vor.vv v8, v24, v8
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add sp, sp, a0
@@ -2388,8 +2394,8 @@ define <128 x i16> @vp_bitreverse_v128i16(<128 x i16> %va, <128 x i1> %m, i32 ze
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: li a2, 64
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 8
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB34_2
@@ -2427,13 +2433,13 @@ define <128 x i16> @vp_bitreverse_v128i16(<128 x i16> %va, <128 x i1> %m, i32 ze
; CHECK-NEXT: sltu a0, a0, a4
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a4
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a4, a4, 3
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
index f80d4e5c0d7c..149073868732 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-bswap-vp.ll
@@ -295,27 +295,27 @@ define <2 x i64> @vp_bswap_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3, v0.t
; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v10, v10, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
+; RV32-NEXT: vlse64.v v10, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11, v0.t
+; RV32-NEXT: vand.vx v11, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v11, v11, 24, v0.t
+; RV32-NEXT: vand.vv v12, v8, v10, v0.t
; RV32-NEXT: vsll.vi v12, v12, 8, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vor.vv v9, v9, v10, v0.t
-; RV32-NEXT: vsrl.vx v10, v8, a1, v0.t
+; RV32-NEXT: vor.vv v11, v11, v12, v0.t
+; RV32-NEXT: vor.vv v9, v9, v11, v0.t
+; RV32-NEXT: vsrl.vx v11, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v12, v8, a3, v0.t
; RV32-NEXT: vand.vx v12, v12, a2, v0.t
-; RV32-NEXT: vor.vv v10, v12, v10, v0.t
+; RV32-NEXT: vor.vv v11, v12, v11, v0.t
; RV32-NEXT: vsrl.vi v12, v8, 24, v0.t
; RV32-NEXT: vand.vx v12, v12, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v11, v0.t
+; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
-; RV32-NEXT: vor.vv v8, v8, v10, v0.t
+; RV32-NEXT: vor.vv v8, v8, v11, v0.t
; RV32-NEXT: vor.vv v8, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -373,27 +373,27 @@ define <2 x i64> @vp_bswap_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v10, v10, a3
; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v10, v8, a4
-; RV32-NEXT: vsll.vi v10, v10, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vlse64.v v11, (a5), zero
+; RV32-NEXT: vlse64.v v10, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
-; RV32-NEXT: vand.vv v12, v8, v11
+; RV32-NEXT: vand.vx v11, v8, a4
+; RV32-NEXT: vsll.vi v11, v11, 24
+; RV32-NEXT: vand.vv v12, v8, v10
; RV32-NEXT: vsll.vi v12, v12, 8
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vor.vv v9, v9, v10
-; RV32-NEXT: vsrl.vx v10, v8, a1
+; RV32-NEXT: vor.vv v11, v11, v12
+; RV32-NEXT: vor.vv v9, v9, v11
+; RV32-NEXT: vsrl.vx v11, v8, a1
; RV32-NEXT: vsrl.vx v12, v8, a3
; RV32-NEXT: vand.vx v12, v12, a2
-; RV32-NEXT: vor.vv v10, v12, v10
+; RV32-NEXT: vor.vv v11, v12, v11
; RV32-NEXT: vsrl.vi v12, v8, 24
; RV32-NEXT: vand.vx v12, v12, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v11
+; RV32-NEXT: vand.vv v8, v8, v10
; RV32-NEXT: vor.vv v8, v8, v12
-; RV32-NEXT: vor.vv v8, v8, v10
+; RV32-NEXT: vor.vv v8, v8, v11
; RV32-NEXT: vor.vv v8, v9, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -453,27 +453,27 @@ define <4 x i64> @vp_bswap_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3, v0.t
; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v12, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
+; RV32-NEXT: vlse64.v v12, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14, v0.t
+; RV32-NEXT: vand.vx v14, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v14, v14, 24, v0.t
+; RV32-NEXT: vand.vv v16, v8, v12, v0.t
; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
-; RV32-NEXT: vor.vv v12, v12, v16, v0.t
-; RV32-NEXT: vor.vv v10, v10, v12, v0.t
-; RV32-NEXT: vsrl.vx v12, v8, a1, v0.t
+; RV32-NEXT: vor.vv v14, v14, v16, v0.t
+; RV32-NEXT: vor.vv v10, v10, v14, v0.t
+; RV32-NEXT: vsrl.vx v14, v8, a1, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v12, v16, v12, v0.t
+; RV32-NEXT: vor.vv v14, v16, v14, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 24, v0.t
; RV32-NEXT: vand.vx v16, v16, a4, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v14, v0.t
+; RV32-NEXT: vand.vv v8, v8, v12, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vor.vv v8, v8, v12, v0.t
+; RV32-NEXT: vor.vv v8, v8, v14, v0.t
; RV32-NEXT: vor.vv v8, v10, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -531,27 +531,27 @@ define <4 x i64> @vp_bswap_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v12, v12, a3
; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4
-; RV32-NEXT: vsll.vi v12, v12, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vlse64.v v14, (a5), zero
+; RV32-NEXT: vlse64.v v12, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v14
+; RV32-NEXT: vand.vx v14, v8, a4
+; RV32-NEXT: vsll.vi v14, v14, 24
+; RV32-NEXT: vand.vv v16, v8, v12
; RV32-NEXT: vsll.vi v16, v16, 8
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vor.vv v10, v10, v12
-; RV32-NEXT: vsrl.vx v12, v8, a1
+; RV32-NEXT: vor.vv v14, v14, v16
+; RV32-NEXT: vor.vv v10, v10, v14
+; RV32-NEXT: vsrl.vx v14, v8, a1
; RV32-NEXT: vsrl.vx v16, v8, a3
; RV32-NEXT: vand.vx v16, v16, a2
-; RV32-NEXT: vor.vv v12, v16, v12
+; RV32-NEXT: vor.vv v14, v16, v14
; RV32-NEXT: vsrl.vi v16, v8, 24
; RV32-NEXT: vand.vx v16, v16, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v14
+; RV32-NEXT: vand.vv v8, v8, v12
; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vor.vv v8, v8, v12
+; RV32-NEXT: vor.vv v8, v8, v14
; RV32-NEXT: vor.vv v8, v10, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -611,13 +611,13 @@ define <8 x i64> @vp_bswap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3, v0.t
; RV32-NEXT: vor.vv v16, v12, v16, v0.t
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v12, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v20, v12, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v12, (a5), zero
+; RV32-NEXT: vlse64.v v12, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
+; RV32-NEXT: vand.vx v20, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v20, v20, 24, v0.t
; RV32-NEXT: vand.vv v24, v8, v12, v0.t
; RV32-NEXT: vsll.vi v24, v24, 8, v0.t
; RV32-NEXT: vor.vv v20, v20, v24, v0.t
@@ -689,27 +689,27 @@ define <8 x i64> @vp_bswap_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: li a3, 40
; RV32-NEXT: vsll.vx v16, v16, a3
; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v16, v16, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV32-NEXT: vlse64.v v20, (a5), zero
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v20
+; RV32-NEXT: vand.vx v20, v8, a4
+; RV32-NEXT: vsll.vi v20, v20, 24
+; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
-; RV32-NEXT: vor.vv v16, v16, v24
-; RV32-NEXT: vor.vv v12, v12, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
+; RV32-NEXT: vor.vv v20, v20, v24
+; RV32-NEXT: vor.vv v12, v12, v20
+; RV32-NEXT: vsrl.vx v20, v8, a1
; RV32-NEXT: vsrl.vx v24, v8, a3
; RV32-NEXT: vand.vx v24, v24, a2
-; RV32-NEXT: vor.vv v16, v24, v16
+; RV32-NEXT: vor.vv v20, v24, v20
; RV32-NEXT: vsrl.vi v24, v8, 24
; RV32-NEXT: vand.vx v24, v24, a4
; RV32-NEXT: vsrl.vi v8, v8, 8
-; RV32-NEXT: vand.vv v8, v8, v20
+; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: vor.vv v8, v8, v16
+; RV32-NEXT: vor.vv v8, v8, v20
; RV32-NEXT: vor.vv v8, v12, v8
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
@@ -779,20 +779,23 @@ define <15 x i64> @vp_bswap_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %ev
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 16
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
@@ -805,10 +808,10 @@ define <15 x i64> @vp_bswap_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %ev
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -913,13 +916,13 @@ define <15 x i64> @vp_bswap_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v24, v0, v24
@@ -1010,20 +1013,23 @@ define <16 x i64> @vp_bswap_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4, v0.t
-; RV32-NEXT: vsll.vi v24, v16, 24, v0.t
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
-; RV32-NEXT: csrr a5, vlenb
-; RV32-NEXT: slli a5, a5, 3
-; RV32-NEXT: add a5, sp, a5
-; RV32-NEXT: addi a5, a5, 16
-; RV32-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: slli a4, a4, 3
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsll.vi v16, v16, 8, v0.t
+; RV32-NEXT: vand.vx v24, v8, a4, v0.t
+; RV32-NEXT: vsll.vi v24, v24, 24, v0.t
+; RV32-NEXT: addi a0, sp, 16
+; RV32-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsll.vi v16, v24, 8, v0.t
+; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
@@ -1036,10 +1042,10 @@ define <16 x i64> @vp_bswap_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %ev
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vx v24, v8, a1, v0.t
-; RV32-NEXT: vsrl.vx v16, v8, a3, v0.t
-; RV32-NEXT: vand.vx v16, v16, a2, v0.t
-; RV32-NEXT: vor.vv v16, v16, v24, v0.t
+; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
+; RV32-NEXT: vsrl.vx v24, v8, a3, v0.t
+; RV32-NEXT: vand.vx v24, v24, a2, v0.t
+; RV32-NEXT: vor.vv v16, v24, v16, v0.t
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v24, v8, 24, v0.t
@@ -1144,13 +1150,13 @@ define <16 x i64> @vp_bswap_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vor.vv v16, v16, v24
; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: lui a4, 4080
-; RV32-NEXT: vand.vx v16, v8, a4
-; RV32-NEXT: vsll.vi v0, v16, 24
-; RV32-NEXT: addi a5, sp, 8
+; RV32-NEXT: addi a4, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a5), zero
+; RV32-NEXT: vlse64.v v16, (a4), zero
+; RV32-NEXT: lui a4, 4080
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vx v0, v8, a4
+; RV32-NEXT: vsll.vi v0, v0, 24
; RV32-NEXT: vand.vv v24, v8, v16
; RV32-NEXT: vsll.vi v24, v24, 8
; RV32-NEXT: vor.vv v24, v0, v24
@@ -1228,8 +1234,8 @@ define <128 x i16> @vp_bswap_v128i16(<128 x i16> %va, <128 x i1> %m, i32 zeroext
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: li a2, 64
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 8
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
@@ -1246,13 +1252,13 @@ define <128 x i16> @vp_bswap_v128i16(<128 x i16> %va, <128 x i1> %m, i32 zeroext
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsrl.vi v16, v8, 8, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 8, v0.t
; CHECK-NEXT: vor.vv v16, v8, v16, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
index af7d7f7ae755..65a1035fd815 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-buildvec-of-binop.ll
@@ -567,13 +567,14 @@ define <8 x i32> @add_constant_rhs_8xi32_partial(<8 x i32> %vin, i32 %a, i32 %b,
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: vmv.s.x v10, a2
+; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0)
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vle32.v v12, (a0)
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: vmv.s.x v10, a3
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: lui a0, %hi(.LCPI19_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI19_0)
-; CHECK-NEXT: vle32.v v12, (a0)
; CHECK-NEXT: vslideup.vi v8, v10, 7
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
index 3e2af7e8267b..befbfb88550b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ceil-vp.ll
@@ -204,8 +204,8 @@ define <8 x half> @vp_ceil_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 3
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <16 x half> @llvm.vp.ceil.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_ceil_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 3
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <16 x half> @vp_ceil_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %e
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 3
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -439,8 +439,8 @@ define <8 x float> @vp_ceil_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -483,8 +483,8 @@ define <16 x float> @vp_ceil_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -561,16 +561,16 @@ declare <4 x double> @llvm.vp.ceil.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_ceil_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -605,16 +605,16 @@ declare <8 x double> @llvm.vp.ceil.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_ceil_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -649,16 +649,16 @@ declare <15 x double> @llvm.vp.ceil.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_ceil_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -693,16 +693,16 @@ declare <16 x double> @llvm.vp.ceil.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_ceil_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -737,69 +737,59 @@ declare <32 x double> @llvm.vp.ceil.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_ceil_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_ceil_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v24, v0, 2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a1, 3
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 3
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
index 2f4539d5038c..b42fb8c68616 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctlz-vp.ll
@@ -1503,24 +1503,28 @@ declare <15 x i64> @llvm.vp.ctlz.v15i64(<15 x i64>, i1 immarg, <15 x i1>, i32)
define <15 x i64> @vp_ctlz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_ctlz_v15i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
@@ -1535,37 +1539,60 @@ define <15 x i64> @vp_ctlz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vnot.v v24, v8, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_ctlz_v15i64:
@@ -1655,33 +1682,29 @@ define <15 x i64> @vp_ctlz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vsrl.vx v16, v8, a1
; RV32-NEXT: vor.vv v8, v8, v16
; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -1743,24 +1766,28 @@ declare <16 x i64> @llvm.vp.ctlz.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32)
define <16 x i64> @vp_ctlz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_ctlz_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
@@ -1775,37 +1802,60 @@ define <16 x i64> @vp_ctlz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vnot.v v24, v8, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_ctlz_v16i64:
@@ -1895,33 +1945,29 @@ define <16 x i64> @vp_ctlz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vsrl.vx v16, v8, a1
; RV32-NEXT: vor.vv v8, v8, v16
; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -1991,7 +2037,7 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 56 * vlenb
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -2035,111 +2081,145 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 48
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
+; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
+; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: li a4, 48
+; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: addi a3, sp, 24
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a3, a3, a5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v8, (a4), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 24
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
; RV32-NEXT: li a2, 56
; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
@@ -2147,13 +2227,13 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sltu a0, a0, a3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: vor.vv v8, v16, v8, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t
@@ -2171,18 +2251,18 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
+; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -2191,17 +2271,35 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
@@ -2211,7 +2309,7 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
@@ -2219,21 +2317,21 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
+; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -2257,8 +2355,8 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB34_2
@@ -2315,13 +2413,13 @@ define <32 x i64> @vp_ctlz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: sltu a0, a0, a7
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a7
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a7, vlenb
+; RV64-NEXT: slli a7, a7, 3
+; RV64-NEXT: add a7, sp, a7
+; RV64-NEXT: addi a7, a7, 16
+; RV64-NEXT: vl8r.v v8, (a7) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV64-NEXT: vor.vv v16, v8, v16, v0.t
; RV64-NEXT: vsrl.vi v8, v16, 2, v0.t
@@ -2364,10 +2462,14 @@ define <32 x i64> @vp_ctlz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: addi sp, sp, -48
; RV32-NEXT: .cfi_def_cfa_offset 48
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 32 * vlenb
-; RV32-NEXT: vmv8r.v v24, v16
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
; RV32-NEXT: sw a1, 44(sp)
@@ -2391,74 +2493,8 @@ define <32 x i64> @vp_ctlz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB35_2:
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 2
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 8
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 16
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v8, a2
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
-; RV32-NEXT: addi a3, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 24
-; RV32-NEXT: mul a3, a3, a4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v0
-; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
-; RV32-NEXT: addi a3, sp, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: li a1, 56
-; RV32-NEXT: vsrl.vx v8, v8, a1
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, a0, -16
-; RV32-NEXT: sltu a0, a0, a3
-; RV32-NEXT: addi a0, a0, -1
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v8, v24, 1
-; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 1
+; RV32-NEXT: vor.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 2
; RV32-NEXT: vor.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 4
@@ -2467,41 +2503,84 @@ define <32 x i64> @vp_ctlz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vor.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 16
; RV32-NEXT: vor.vv v8, v8, v24
+; RV32-NEXT: li a2, 32
; RV32-NEXT: vsrl.vx v24, v8, a2
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v24, v8, 1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a2, 24
-; RV32-NEXT: mul a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vnot.v v0, v8
+; RV32-NEXT: addi a3, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v24, v0, 1
; RV32-NEXT: vand.vv v24, v24, v16
-; RV32-NEXT: vsub.vv v8, v8, v24
-; RV32-NEXT: vand.vv v24, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
+; RV32-NEXT: vsub.vv v24, v0, v24
+; RV32-NEXT: vand.vv v0, v24, v8
+; RV32-NEXT: vsrl.vi v24, v24, 2
+; RV32-NEXT: vand.vv v24, v24, v8
+; RV32-NEXT: vadd.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 4
+; RV32-NEXT: vadd.vv v24, v24, v0
+; RV32-NEXT: addi a3, sp, 48
+; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, a0, -16
+; RV32-NEXT: sltu a0, a0, a3
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v0, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v24, v0, 1
+; RV32-NEXT: vor.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 2
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vi v0, v24, 4
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vi v0, v24, 8
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vi v0, v24, 16
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vx v0, v24, a2
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vnot.v v24, v24
+; RV32-NEXT: vsrl.vi v0, v24, 1
+; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: addi a2, sp, 24
+; RV32-NEXT: vsub.vv v16, v24, v16
+; RV32-NEXT: vand.vv v24, v16, v8
+; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: vand.vv v8, v16, v8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a2), zero
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a2), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v0, v0, v16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a0, sp, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v16, v0, v24
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v24, v8, v24
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v8, v16, a2
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v16, v24, a2
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
@@ -4060,24 +4139,28 @@ define <8 x i64> @vp_ctlz_zero_undef_v8i64_unmasked(<8 x i64> %va, i32 zeroext %
define <15 x i64> @vp_ctlz_zero_undef_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_ctlz_zero_undef_v15i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
@@ -4092,37 +4175,60 @@ define <15 x i64> @vp_ctlz_zero_undef_v15i64(<15 x i64> %va, <15 x i1> %m, i32 z
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vnot.v v24, v8, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_ctlz_zero_undef_v15i64:
@@ -4212,33 +4318,29 @@ define <15 x i64> @vp_ctlz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroex
; RV32-NEXT: vsrl.vx v16, v8, a1
; RV32-NEXT: vor.vv v8, v8, v16
; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -4298,24 +4400,28 @@ define <15 x i64> @vp_ctlz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroex
define <16 x i64> @vp_ctlz_zero_undef_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_ctlz_zero_undef_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
@@ -4330,37 +4436,60 @@ define <16 x i64> @vp_ctlz_zero_undef_v16i64(<16 x i64> %va, <16 x i1> %m, i32 z
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
-; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vnot.v v24, v8, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_ctlz_zero_undef_v16i64:
@@ -4450,33 +4579,29 @@ define <16 x i64> @vp_ctlz_zero_undef_v16i64_unmasked(<16 x i64> %va, i32 zeroex
; RV32-NEXT: vsrl.vx v16, v8, a1
; RV32-NEXT: vor.vv v8, v8, v16
; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -4544,7 +4669,7 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 56 * vlenb
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -4588,111 +4713,145 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 48
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
+; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
+; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: li a4, 48
+; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: addi a3, sp, 24
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a3, a3, a5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v8, (a4), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 24
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
; RV32-NEXT: li a2, 56
; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
@@ -4700,13 +4859,13 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: sltu a0, a0, a3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: vor.vv v8, v16, v8, v0.t
; RV32-NEXT: vsrl.vi v16, v8, 2, v0.t
@@ -4724,18 +4883,18 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
+; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -4744,17 +4903,35 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
@@ -4764,7 +4941,7 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
@@ -4772,21 +4949,21 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
+; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -4810,8 +4987,8 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB70_2
@@ -4868,13 +5045,13 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV64-NEXT: sltu a0, a0, a7
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a7
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a7, vlenb
+; RV64-NEXT: slli a7, a7, 3
+; RV64-NEXT: add a7, sp, a7
+; RV64-NEXT: addi a7, a7, 16
+; RV64-NEXT: vl8r.v v8, (a7) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV64-NEXT: vor.vv v16, v8, v16, v0.t
; RV64-NEXT: vsrl.vi v8, v16, 2, v0.t
@@ -4917,10 +5094,14 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
; RV32-NEXT: addi sp, sp, -48
; RV32-NEXT: .cfi_def_cfa_offset 48
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 32 * vlenb
-; RV32-NEXT: vmv8r.v v24, v16
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
; RV32-NEXT: sw a1, 44(sp)
@@ -4944,74 +5125,8 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB71_2:
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 2
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 8
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 16
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsrl.vx v16, v8, a2
-; RV32-NEXT: vor.vv v8, v8, v16
-; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v16, v8, 1
-; RV32-NEXT: addi a3, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 24
-; RV32-NEXT: mul a3, a3, a4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v0
-; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
-; RV32-NEXT: addi a3, sp, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: li a1, 56
-; RV32-NEXT: vsrl.vx v8, v8, a1
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, a0, -16
-; RV32-NEXT: sltu a0, a0, a3
-; RV32-NEXT: addi a0, a0, -1
-; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v8, v24, 1
-; RV32-NEXT: vor.vv v8, v24, v8
+; RV32-NEXT: vsrl.vi v24, v8, 1
+; RV32-NEXT: vor.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 2
; RV32-NEXT: vor.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 4
@@ -5020,41 +5135,84 @@ define <32 x i64> @vp_ctlz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
; RV32-NEXT: vor.vv v8, v8, v24
; RV32-NEXT: vsrl.vi v24, v8, 16
; RV32-NEXT: vor.vv v8, v8, v24
+; RV32-NEXT: li a2, 32
; RV32-NEXT: vsrl.vx v24, v8, a2
; RV32-NEXT: vor.vv v8, v8, v24
-; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vsrl.vi v24, v8, 1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a2, 24
-; RV32-NEXT: mul a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vnot.v v0, v8
+; RV32-NEXT: addi a3, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v24, v0, 1
; RV32-NEXT: vand.vv v24, v24, v16
-; RV32-NEXT: vsub.vv v8, v8, v24
-; RV32-NEXT: vand.vv v24, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
+; RV32-NEXT: vsub.vv v24, v0, v24
+; RV32-NEXT: vand.vv v0, v24, v8
+; RV32-NEXT: vsrl.vi v24, v24, 2
+; RV32-NEXT: vand.vv v24, v24, v8
+; RV32-NEXT: vadd.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 4
+; RV32-NEXT: vadd.vv v24, v24, v0
+; RV32-NEXT: addi a3, sp, 48
+; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, a0, -16
+; RV32-NEXT: sltu a0, a0, a3
+; RV32-NEXT: addi a0, a0, -1
+; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v0, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v24, v0, 1
+; RV32-NEXT: vor.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 2
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vi v0, v24, 4
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vi v0, v24, 8
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vi v0, v24, 16
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vsrl.vx v0, v24, a2
+; RV32-NEXT: vor.vv v24, v24, v0
+; RV32-NEXT: vnot.v v24, v24
+; RV32-NEXT: vsrl.vi v0, v24, 1
+; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: addi a2, sp, 24
+; RV32-NEXT: vsub.vv v16, v24, v16
+; RV32-NEXT: vand.vv v24, v16, v8
+; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: vand.vv v8, v16, v8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a2), zero
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a2), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v0, v0, v16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a0, sp, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v16, v0, v24
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v24, v8, v24
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v8, v16, a2
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v16, v24, a2
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
index 0b6d8b33394d..5fceab869ab8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll
@@ -1119,55 +1119,93 @@ declare <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64>, <15 x i1>, i32)
define <15 x i64> @vp_ctpop_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_ctpop_v15i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 24
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v24, v16, v24, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_ctpop_v15i64:
@@ -1228,34 +1266,29 @@ define <15 x i64> @vp_ctpop_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: addi a1, a1, 257
; RV32-NEXT: sw a1, 4(sp)
; RV32-NEXT: sw a1, 0(sp)
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -1303,55 +1336,93 @@ declare <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64>, <16 x i1>, i32)
define <16 x i64> @vp_ctpop_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_ctpop_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 24
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 24 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v24, v16, v24, v0.t
+; RV32-NEXT: vsub.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 24
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_ctpop_v16i64:
@@ -1412,34 +1483,29 @@ define <16 x i64> @vp_ctpop_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: addi a1, a1, 257
; RV32-NEXT: sw a1, 4(sp)
; RV32-NEXT: sw a1, 0(sp)
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -1495,11 +1561,16 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 48 * vlenb
; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a2, 40
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
-; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: lui a1, 349525
@@ -1524,74 +1595,93 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB34_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV32-NEXT: addi a2, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 24
-; RV32-NEXT: mul a3, a3, a4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: vlse64.v v8, (a2), zero
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: slli a2, a2, 5
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 48
; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: addi a2, sp, 32
+; RV32-NEXT: vlse64.v v16, (a2), zero
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: li a3, 24
; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 48
-; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v24, v8, v16, v0.t
-; RV32-NEXT: addi a2, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a2), zero
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: li a3, 24
+; RV32-NEXT: li a3, 40
; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 48
-; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v24, v8, v0.t
+; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: slli a2, a2, 5
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 48
-; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vi v16, v24, 2, v0.t
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v24, v24, v8, v0.t
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: li a3, 40
+; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 48
; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v8, v16, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v16, v8, v0.t
-; RV32-NEXT: addi a2, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a2), zero
+; RV32-NEXT: vsub.vv v24, v8, v24, v0.t
+; RV32-NEXT: vand.vv v8, v24, v16, v0.t
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: li a3, 40
+; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 48
; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
-; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vsrl.vi v8, v24, 2, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 40
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: addi a2, sp, 24
+; RV32-NEXT: addi a3, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a2), zero
; RV32-NEXT: addi a2, sp, 48
; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 40
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v24, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v24, v0.t
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
; RV32-NEXT: li a1, 56
; RV32-NEXT: vsrl.vx v8, v8, a1, v0.t
; RV32-NEXT: csrr a2, vlenb
@@ -1603,14 +1693,13 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: sltu a0, a0, a2
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a2
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a2, 40
-; RV32-NEXT: mul a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 4
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
@@ -1625,20 +1714,37 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: li a2, 24
+; RV32-NEXT: mul a0, a0, a2
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v8, v16, v8, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: addi a0, sp, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a2, 40
+; RV32-NEXT: mul a0, a0, a2
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t
; RV32-NEXT: csrr a0, vlenb
@@ -1666,8 +1772,8 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB34_2
@@ -1710,13 +1816,13 @@ define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: sltu a0, a0, a6
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a6
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a6, vlenb
+; RV64-NEXT: slli a6, a6, 3
+; RV64-NEXT: add a6, sp, a6
+; RV64-NEXT: addi a6, a6, 16
+; RV64-NEXT: vl8r.v v8, (a6) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t
; RV64-NEXT: vand.vx v16, v16, a1, v0.t
; RV64-NEXT: vsub.vv v16, v8, v16, v0.t
@@ -1746,12 +1852,11 @@ define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: addi sp, sp, -48
; RV32-NEXT: .cfi_def_cfa_offset 48
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 40
-; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 40 * vlenb
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -1777,97 +1882,67 @@ define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB35_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a2, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a2), zero
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: li a3, 24
-; RV32-NEXT: mul a2, a2, a3
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 48
-; RV32-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
-; RV32-NEXT: vsub.vv v8, v8, v16
+; RV32-NEXT: vlse64.v v16, (a2), zero
; RV32-NEXT: addi a2, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a2), zero
+; RV32-NEXT: vlse64.v v24, (a2), zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v0
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v0, v0, v16
+; RV32-NEXT: vsub.vv v8, v8, v0
+; RV32-NEXT: vand.vv v0, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
-; RV32-NEXT: addi a2, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a2), zero
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 4
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 48
-; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v16
-; RV32-NEXT: addi a2, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a2), zero
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v0, v8
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
; RV32-NEXT: addi a2, sp, 48
; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v16, v16, v8
-; RV32-NEXT: li a1, 56
-; RV32-NEXT: vsrl.vx v8, v16, a1
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a2, a2, 3
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 48
-; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; RV32-NEXT: addi a2, a0, -16
; RV32-NEXT: sltu a0, a0, a2
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a2
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vsrl.vi v16, v8, 1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a2, 24
-; RV32-NEXT: mul a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: addi a2, sp, 24
; RV32-NEXT: vsub.vv v16, v8, v16
-; RV32-NEXT: vand.vv v8, v16, v0
+; RV32-NEXT: vand.vv v0, v16, v24
; RV32-NEXT: vsrl.vi v16, v16, 2
-; RV32-NEXT: vand.vv v16, v16, v0
-; RV32-NEXT: vadd.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a2), zero
+; RV32-NEXT: addi a2, sp, 16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vadd.vv v16, v0, v16
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v0, (a2), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v16, 4
+; RV32-NEXT: vadd.vv v8, v16, v8
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v16, v16, v0
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v24, v8, v0
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v8, v16, a2
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v16, v24, a2
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a0, sp, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
-; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
index f2926fa91e5c..e7736e7f360f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-cttz-vp.ll
@@ -1263,59 +1263,86 @@ declare <15 x i64> @llvm.vp.cttz.v15i64(<15 x i64>, i1 immarg, <15 x i1>, i32)
define <15 x i64> @vp_cttz_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_cttz_v15i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: li a1, 1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v16, v8, a1, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_cttz_v15i64:
@@ -1385,33 +1412,29 @@ define <15 x i64> @vp_cttz_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vsub.vx v16, v8, a1
; RV32-NEXT: vnot.v v8, v8
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -1463,59 +1486,86 @@ declare <16 x i64> @llvm.vp.cttz.v16i64(<16 x i64>, i1 immarg, <16 x i1>, i32)
define <16 x i64> @vp_cttz_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_cttz_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: li a1, 1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v16, v8, a1, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_cttz_v16i64:
@@ -1585,33 +1635,29 @@ define <16 x i64> @vp_cttz_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: vsub.vx v16, v8, a1
; RV32-NEXT: vnot.v v8, v8
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -1671,7 +1717,7 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 56 * vlenb
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -1705,111 +1751,145 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vnot.v v8, v8, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 48
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
+; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
+; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: li a4, 48
+; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: addi a3, sp, 24
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a3, a3, a5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v8, (a4), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 24
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
; RV32-NEXT: li a2, 56
; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
@@ -1817,13 +1897,13 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sltu a0, a0, a3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v8, v16, a1, v0.t
; RV32-NEXT: vnot.v v16, v16, v0.t
; RV32-NEXT: vand.vv v8, v16, v8, v0.t
@@ -1831,18 +1911,18 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
+; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -1851,17 +1931,35 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
@@ -1871,7 +1969,7 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
@@ -1879,21 +1977,21 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
+; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -1917,8 +2015,8 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a1, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a2, a0
; RV64-NEXT: bltu a0, a1, .LBB34_2
@@ -1965,13 +2063,13 @@ define <32 x i64> @vp_cttz_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: sltu a0, a0, a7
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a7
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a7, vlenb
+; RV64-NEXT: slli a7, a7, 3
+; RV64-NEXT: add a7, sp, a7
+; RV64-NEXT: addi a7, a7, 16
+; RV64-NEXT: vl8r.v v8, (a7) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsub.vx v16, v8, a1, v0.t
; RV64-NEXT: vnot.v v8, v8, v0.t
; RV64-NEXT: vand.vv v8, v8, v16, v0.t
@@ -2004,10 +2102,14 @@ define <32 x i64> @vp_cttz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: addi sp, sp, -48
; RV32-NEXT: .cfi_def_cfa_offset 48
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 32 * vlenb
-; RV32-NEXT: vmv8r.v v24, v16
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
; RV32-NEXT: sw a1, 44(sp)
@@ -2032,96 +2134,73 @@ define <32 x i64> @vp_cttz_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) {
; RV32-NEXT: .LBB35_2:
; RV32-NEXT: li a2, 1
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsub.vx v16, v8, a2
+; RV32-NEXT: vsub.vx v24, v8, a2
; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: vand.vv v0, v8, v24
; RV32-NEXT: addi a3, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 24
-; RV32-NEXT: mul a3, a3, a4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v0
-; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: vsrl.vi v24, v0, 1
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vsub.vv v24, v0, v24
+; RV32-NEXT: vand.vv v0, v24, v8
+; RV32-NEXT: vsrl.vi v24, v24, 2
+; RV32-NEXT: vand.vv v24, v24, v8
+; RV32-NEXT: vadd.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 4
+; RV32-NEXT: vadd.vv v24, v24, v0
; RV32-NEXT: addi a3, sp, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: li a1, 56
-; RV32-NEXT: vsrl.vx v8, v8, a1
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; RV32-NEXT: addi a3, a0, -16
; RV32-NEXT: sltu a0, a0, a3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v0, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsub.vx v24, v0, a2
+; RV32-NEXT: vnot.v v0, v0
+; RV32-NEXT: vand.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 1
+; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: addi a2, sp, 24
+; RV32-NEXT: vsub.vv v16, v24, v16
+; RV32-NEXT: vand.vv v24, v16, v8
+; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: vand.vv v8, v16, v8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a2), zero
+; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsub.vx v8, v24, a2
-; RV32-NEXT: vnot.v v24, v24
-; RV32-NEXT: vand.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a2, 24
-; RV32-NEXT: mul a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v24, v24, v16
-; RV32-NEXT: vsub.vv v8, v8, v24
-; RV32-NEXT: vand.vv v24, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a2), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v0, v0, v16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a0, sp, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v16, v0, v24
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v24, v8, v24
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v8, v16, a2
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v16, v24, a2
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
@@ -3420,59 +3499,86 @@ define <8 x i64> @vp_cttz_zero_undef_v8i64_unmasked(<8 x i64> %va, i32 zeroext %
define <15 x i64> @vp_cttz_zero_undef_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_cttz_zero_undef_v15i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: li a1, 1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v16, v8, a1, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_cttz_zero_undef_v15i64:
@@ -3542,33 +3648,29 @@ define <15 x i64> @vp_cttz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroex
; RV32-NEXT: vsub.vx v16, v8, a1
; RV32-NEXT: vnot.v v8, v8
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -3618,59 +3720,86 @@ define <15 x i64> @vp_cttz_zero_undef_v15i64_unmasked(<15 x i64> %va, i32 zeroex
define <16 x i64> @vp_cttz_zero_undef_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vp_cttz_zero_undef_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -32
-; RV32-NEXT: .cfi_def_cfa_offset 32
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 4
+; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
-; RV32-NEXT: sw a1, 28(sp)
-; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: sw a1, 44(sp)
+; RV32-NEXT: sw a1, 40(sp)
; RV32-NEXT: lui a1, 209715
; RV32-NEXT: addi a1, a1, 819
-; RV32-NEXT: sw a1, 20(sp)
-; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: sw a1, 36(sp)
+; RV32-NEXT: sw a1, 32(sp)
; RV32-NEXT: lui a1, 61681
; RV32-NEXT: addi a1, a1, -241
-; RV32-NEXT: sw a1, 12(sp)
-; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: sw a1, 24(sp)
; RV32-NEXT: lui a1, 4112
; RV32-NEXT: addi a1, a1, 257
-; RV32-NEXT: sw a1, 4(sp)
-; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: sw a1, 16(sp)
; RV32-NEXT: li a1, 1
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v16, v8, a1, v0.t
; RV32-NEXT: vnot.v v8, v8, v0.t
-; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t
-; RV32-NEXT: addi a1, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v24, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24, v0.t
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vand.vv v24, v8, v16, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 32
; RV32-NEXT: vlse64.v v16, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v24, 1, v0.t
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v8, v8, v24, v0.t
+; RV32-NEXT: addi a1, sp, 48
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v8, v24, v8, v0.t
; RV32-NEXT: vand.vv v24, v8, v16, v0.t
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: vadd.vv v8, v24, v8, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: addi a1, sp, 8
+; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
-; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vmul.vv v8, v8, v24, v0.t
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
-; RV32-NEXT: addi sp, sp, 32
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: vp_cttz_zero_undef_v16i64:
@@ -3740,33 +3869,29 @@ define <16 x i64> @vp_cttz_zero_undef_v16i64_unmasked(<16 x i64> %va, i32 zeroex
; RV32-NEXT: vsub.vx v16, v8, a1
; RV32-NEXT: vnot.v v8, v8
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 1
; RV32-NEXT: addi a1, sp, 24
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: addi a1, sp, 16
; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v24
+; RV32-NEXT: vsrl.vi v0, v8, 1
+; RV32-NEXT: vand.vv v16, v0, v16
; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v24, v8, v16
+; RV32-NEXT: vand.vv v16, v8, v24
; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
+; RV32-NEXT: vand.vv v8, v8, v24
+; RV32-NEXT: vadd.vv v8, v16, v8
; RV32-NEXT: addi a1, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a1), zero
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
; RV32-NEXT: mv a1, sp
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a1), zero
+; RV32-NEXT: vlse64.v v24, (a1), zero
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: vand.vv v8, v8, v16
+; RV32-NEXT: vmul.vv v8, v8, v24
; RV32-NEXT: li a0, 56
; RV32-NEXT: vsrl.vx v8, v8, a0
; RV32-NEXT: addi sp, sp, 32
@@ -3824,7 +3949,7 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: sub sp, sp, a1
; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 56 * vlenb
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 48
; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
@@ -3858,111 +3983,145 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vnot.v v8, v8, v0.t
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, sp, 40
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 48
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 40
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v8, v16, 1, v0.t
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
+; RV32-NEXT: li a4, 40
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
+; RV32-NEXT: li a4, 24
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
-; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
+; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v8, (a3), zero
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsub.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 48
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: li a4, 48
+; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t
; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: li a4, 24
+; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
-; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
-; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
; RV32-NEXT: addi a3, sp, 24
+; RV32-NEXT: addi a4, sp, 16
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 40
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: li a5, 24
+; RV32-NEXT: mul a3, a3, a5
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vlse64.v v8, (a4), zero
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 5
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v16, v8, v0.t
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
; RV32-NEXT: csrr a3, vlenb
; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
+; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vadd.vv v16, v8, v16, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: li a3, 24
+; RV32-NEXT: mul a2, a2, a3
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
+; RV32-NEXT: csrr a2, vlenb
+; RV32-NEXT: slli a2, a2, 5
+; RV32-NEXT: add a2, sp, a2
+; RV32-NEXT: addi a2, a2, 48
+; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vmul.vv v8, v16, v8, v0.t
; RV32-NEXT: li a2, 56
; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: slli a3, a3, 3
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 48
; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
@@ -3970,13 +4129,13 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: sltu a0, a0, a3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a3
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 4
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v16, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsub.vx v8, v16, a1, v0.t
; RV32-NEXT: vnot.v v16, v16, v0.t
; RV32-NEXT: vand.vv v8, v16, v8, v0.t
@@ -3984,18 +4143,18 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 24
+; RV32-NEXT: li a1, 40
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -4004,17 +4163,35 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v16, v8, v16, v0.t
+; RV32-NEXT: vand.vv v16, v16, v8, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a0, vlenb
+; RV32-NEXT: li a1, 40
+; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: add a0, sp, a0
+; RV32-NEXT: addi a0, a0, 48
+; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: li a1, 48
@@ -4024,7 +4201,7 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
@@ -4032,21 +4209,21 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 40
+; RV32-NEXT: li a1, 24
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
+; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: vsrl.vx v16, v8, a2, v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 48
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -4070,8 +4247,8 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a1, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a2, a0
; RV64-NEXT: bltu a0, a1, .LBB70_2
@@ -4118,13 +4295,13 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64(<32 x i64> %va, <32 x i1> %m, i32 z
; RV64-NEXT: sltu a0, a0, a7
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a7
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
-; RV64-NEXT: add a0, sp, a0
-; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a7, vlenb
+; RV64-NEXT: slli a7, a7, 3
+; RV64-NEXT: add a7, sp, a7
+; RV64-NEXT: addi a7, a7, 16
+; RV64-NEXT: vl8r.v v8, (a7) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsub.vx v16, v8, a1, v0.t
; RV64-NEXT: vnot.v v8, v8, v0.t
; RV64-NEXT: vand.vv v8, v8, v16, v0.t
@@ -4157,10 +4334,14 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
; RV32-NEXT: addi sp, sp, -48
; RV32-NEXT: .cfi_def_cfa_offset 48
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a1, a1, 5
+; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 32 * vlenb
-; RV32-NEXT: vmv8r.v v24, v16
+; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 16 * vlenb
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a1, a1, 3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 48
+; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, 349525
; RV32-NEXT: addi a1, a1, 1365
; RV32-NEXT: sw a1, 44(sp)
@@ -4185,96 +4366,73 @@ define <32 x i64> @vp_cttz_zero_undef_v32i64_unmasked(<32 x i64> %va, i32 zeroex
; RV32-NEXT: .LBB71_2:
; RV32-NEXT: li a2, 1
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vsub.vx v16, v8, a2
+; RV32-NEXT: vsub.vx v24, v8, a2
; RV32-NEXT: vnot.v v8, v8
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: vsrl.vi v16, v8, 1
+; RV32-NEXT: vand.vv v0, v8, v24
; RV32-NEXT: addi a3, sp, 40
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 24
-; RV32-NEXT: mul a3, a3, a4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v0, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v16, v0
-; RV32-NEXT: vsub.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 32
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v0, (a3), zero
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v16, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
-; RV32-NEXT: vadd.vv v8, v16, v8
-; RV32-NEXT: vsrl.vi v16, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 24
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a3), zero
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 4
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: addi a3, sp, 32
+; RV32-NEXT: vlse64.v v8, (a3), zero
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a3, sp, 16
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vlse64.v v16, (a3), zero
+; RV32-NEXT: vsrl.vi v24, v0, 1
+; RV32-NEXT: vand.vv v24, v24, v16
+; RV32-NEXT: vsub.vv v24, v0, v24
+; RV32-NEXT: vand.vv v0, v24, v8
+; RV32-NEXT: vsrl.vi v24, v24, 2
+; RV32-NEXT: vand.vv v24, v24, v8
+; RV32-NEXT: vadd.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 4
+; RV32-NEXT: vadd.vv v24, v24, v0
; RV32-NEXT: addi a3, sp, 48
-; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: li a1, 56
-; RV32-NEXT: vsrl.vx v8, v8, a1
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a3, a3, 3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 48
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; RV32-NEXT: addi a3, a0, -16
; RV32-NEXT: sltu a0, a0, a3
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a3
+; RV32-NEXT: csrr a3, vlenb
+; RV32-NEXT: slli a3, a3, 3
+; RV32-NEXT: add a3, sp, a3
+; RV32-NEXT: addi a3, a3, 48
+; RV32-NEXT: vl8r.v v0, (a3) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsub.vx v24, v0, a2
+; RV32-NEXT: vnot.v v0, v0
+; RV32-NEXT: vand.vv v24, v0, v24
+; RV32-NEXT: vsrl.vi v0, v24, 1
+; RV32-NEXT: vand.vv v16, v0, v16
+; RV32-NEXT: addi a2, sp, 24
+; RV32-NEXT: vsub.vv v16, v24, v16
+; RV32-NEXT: vand.vv v24, v16, v8
+; RV32-NEXT: vsrl.vi v16, v16, 2
+; RV32-NEXT: vand.vv v8, v16, v8
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v16, (a2), zero
+; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV32-NEXT: vsub.vx v8, v24, a2
-; RV32-NEXT: vnot.v v24, v24
-; RV32-NEXT: vand.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a2, 24
-; RV32-NEXT: mul a0, a0, a2
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vand.vv v24, v24, v16
-; RV32-NEXT: vsub.vv v8, v8, v24
-; RV32-NEXT: vand.vv v24, v8, v0
-; RV32-NEXT: vsrl.vi v8, v8, 2
-; RV32-NEXT: vand.vv v8, v8, v0
; RV32-NEXT: vadd.vv v8, v24, v8
-; RV32-NEXT: vsrl.vi v24, v8, 4
-; RV32-NEXT: vadd.vv v8, v8, v24
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vlse64.v v24, (a2), zero
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vi v0, v8, 4
+; RV32-NEXT: vadd.vv v8, v8, v0
+; RV32-NEXT: addi a2, sp, 48
+; RV32-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vand.vv v0, v0, v16
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v8, v16
-; RV32-NEXT: addi a0, sp, 48
-; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmul.vv v8, v8, v16
-; RV32-NEXT: vsrl.vx v16, v8, a1
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 48
-; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v16, v0, v24
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vmul.vv v24, v8, v24
+; RV32-NEXT: li a2, 56
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v8, v16, a2
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v16, v24, a2
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll
index 1587f770f87c..9f8de22b25c2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll
@@ -15,16 +15,16 @@ define {<16 x i1>, <16 x i1>} @vector_deinterleave_load_v16i1_v32i1(ptr %p) {
; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
; CHECK-NEXT: vid.v v9
; CHECK-NEXT: vadd.vv v11, v9, v9
-; CHECK-NEXT: vrgather.vv v9, v10, v11
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vrgather.vv v9, v10, v11
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vadd.vi v12, v11, -16
; CHECK-NEXT: li a0, -256
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vadd.vi v12, v11, -16
; CHECK-NEXT: vrgather.vv v9, v8, v12, v0.t
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vadd.vi v12, v11, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
index dccb62877af3..386c71cf665c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-i1.ll
@@ -326,9 +326,9 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV32-NEXT: andi sp, sp, -128
; RV32-NEXT: andi a1, a1, 255
; RV32-NEXT: li a2, 128
+; RV32-NEXT: addi a3, a0, 128
; RV32-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV32-NEXT: addi a2, a0, 128
-; RV32-NEXT: vle8.v v16, (a2)
+; RV32-NEXT: vle8.v v16, (a3)
; RV32-NEXT: vle8.v v24, (a0)
; RV32-NEXT: mv a0, sp
; RV32-NEXT: add a1, a0, a1
@@ -357,9 +357,9 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV64-NEXT: andi sp, sp, -128
; RV64-NEXT: andi a1, a1, 255
; RV64-NEXT: li a2, 128
+; RV64-NEXT: addi a3, a0, 128
; RV64-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV64-NEXT: addi a2, a0, 128
-; RV64-NEXT: vle8.v v16, (a2)
+; RV64-NEXT: vle8.v v16, (a3)
; RV64-NEXT: vle8.v v24, (a0)
; RV64-NEXT: mv a0, sp
; RV64-NEXT: add a1, a0, a1
@@ -388,9 +388,9 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV32ZBS-NEXT: andi sp, sp, -128
; RV32ZBS-NEXT: andi a1, a1, 255
; RV32ZBS-NEXT: li a2, 128
+; RV32ZBS-NEXT: addi a3, a0, 128
; RV32ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV32ZBS-NEXT: addi a2, a0, 128
-; RV32ZBS-NEXT: vle8.v v16, (a2)
+; RV32ZBS-NEXT: vle8.v v16, (a3)
; RV32ZBS-NEXT: vle8.v v24, (a0)
; RV32ZBS-NEXT: mv a0, sp
; RV32ZBS-NEXT: add a1, a0, a1
@@ -419,9 +419,9 @@ define i1 @extractelt_v256i1(ptr %x, i64 %idx) nounwind {
; RV64ZBS-NEXT: andi sp, sp, -128
; RV64ZBS-NEXT: andi a1, a1, 255
; RV64ZBS-NEXT: li a2, 128
+; RV64ZBS-NEXT: addi a3, a0, 128
; RV64ZBS-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; RV64ZBS-NEXT: addi a2, a0, 128
-; RV64ZBS-NEXT: vle8.v v16, (a2)
+; RV64ZBS-NEXT: vle8.v v16, (a3)
; RV64ZBS-NEXT: vle8.v v24, (a0)
; RV64ZBS-NEXT: mv a0, sp
; RV64ZBS-NEXT: add a1, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
index b9c611bf3e54..33cd00c9f6af 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract-subvector.ll
@@ -73,7 +73,6 @@ define void @extract_v1i32_v8i32_4(ptr %x, ptr %y) {
; VLA: # %bb.0:
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; VLA-NEXT: vle32.v v8, (a0)
-; VLA-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; VLA-NEXT: vslidedown.vi v8, v8, 4
; VLA-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; VLA-NEXT: vse32.v v8, (a1)
@@ -96,7 +95,6 @@ define void @extract_v1i32_v8i32_5(ptr %x, ptr %y) {
; VLA: # %bb.0:
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; VLA-NEXT: vle32.v v8, (a0)
-; VLA-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; VLA-NEXT: vslidedown.vi v8, v8, 5
; VLA-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; VLA-NEXT: vse32.v v8, (a1)
@@ -391,9 +389,8 @@ define void @extract_v8i1_v64i1_8(ptr %x, ptr %y) {
; VLA-NEXT: li a2, 64
; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; VLA-NEXT: vlm.v v8, (a0)
-; VLA-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; VLA-NEXT: vslidedown.vi v8, v8, 1
; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 1
; VLA-NEXT: vsm.v v8, (a1)
; VLA-NEXT: ret
;
@@ -401,9 +398,8 @@ define void @extract_v8i1_v64i1_8(ptr %x, ptr %y) {
; VLS: # %bb.0:
; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
; VLS-NEXT: vlm.v v8, (a0)
-; VLS-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; VLS-NEXT: vslidedown.vi v8, v8, 1
; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v8, 1
; VLS-NEXT: vsm.v v8, (a1)
; VLS-NEXT: ret
%a = load <64 x i1>, ptr %x
@@ -418,9 +414,8 @@ define void @extract_v8i1_v64i1_48(ptr %x, ptr %y) {
; VLA-NEXT: li a2, 64
; VLA-NEXT: vsetvli zero, a2, e8, m4, ta, ma
; VLA-NEXT: vlm.v v8, (a0)
-; VLA-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; VLA-NEXT: vslidedown.vi v8, v8, 6
; VLA-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLA-NEXT: vslidedown.vi v8, v8, 6
; VLA-NEXT: vsm.v v8, (a1)
; VLA-NEXT: ret
;
@@ -428,9 +423,8 @@ define void @extract_v8i1_v64i1_48(ptr %x, ptr %y) {
; VLS: # %bb.0:
; VLS-NEXT: vsetvli a2, zero, e8, m4, ta, ma
; VLS-NEXT: vlm.v v8, (a0)
-; VLS-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; VLS-NEXT: vslidedown.vi v8, v8, 6
; VLS-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; VLS-NEXT: vslidedown.vi v8, v8, 6
; VLS-NEXT: vsm.v v8, (a1)
; VLS-NEXT: ret
%a = load <64 x i1>, ptr %x
@@ -853,9 +847,8 @@ define void @extract_v2i1_nxv32i1_26(<vscale x 32 x i1> %x, ptr %y) {
define void @extract_v8i1_nxv32i1_16(<vscale x 32 x i1> %x, ptr %y) {
; CHECK-LABEL: extract_v8i1_nxv32i1_16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v0, 2
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v0, 2
; CHECK-NEXT: vsm.v v8, (a0)
; CHECK-NEXT: ret
%c = call <8 x i1> @llvm.vector.extract.v8i1.nxv32i1(<vscale x 32 x i1> %x, i64 16)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
index e969da6fd45b..d309da6df7dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extract.ll
@@ -138,7 +138,6 @@ define i32 @extractelt_v8i32(ptr %x) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 6
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -152,9 +151,9 @@ define i64 @extractelt_v4i64(ptr %x) nounwind {
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vle64.v v8, (a0)
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vsrl.vx v10, v8, a0
; RV32-NEXT: vmv.x.s a1, v10
; RV32-NEXT: vmv.x.s a0, v8
@@ -164,7 +163,6 @@ define i64 @extractelt_v4i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
@@ -233,7 +231,6 @@ define i64 @extractelt_v3i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 3, e64, m2, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 2
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
@@ -452,7 +449,6 @@ define i8 @extractelt_v32i8_idx(ptr %x, i32 zeroext %idx) nounwind {
; CHECK-NEXT: li a2, 32
; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -466,7 +462,6 @@ define i16 @extractelt_v16i16_idx(ptr %x, i32 zeroext %idx) nounwind {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -481,7 +476,6 @@ define i32 @extractelt_v8i32_idx(ptr %x, i32 zeroext %idx) nounwind {
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vadd.vv v8, v8, v8
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -497,10 +491,10 @@ define i64 @extractelt_v4i64_idx(ptr %x, i32 zeroext %idx) nounwind {
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vle64.v v8, (a0)
; RV32-NEXT: vadd.vv v8, v8, v8
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vx v8, v8, a1
; RV32-NEXT: vmv.x.s a0, v8
; RV32-NEXT: li a1, 32
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vsrl.vx v8, v8, a1
; RV32-NEXT: vmv.x.s a1, v8
; RV32-NEXT: ret
@@ -510,7 +504,6 @@ define i64 @extractelt_v4i64_idx(ptr %x, i32 zeroext %idx) nounwind {
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: vadd.vv v8, v8, v8
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vx v8, v8, a1
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
@@ -526,7 +519,6 @@ define half @extractelt_v16f16_idx(ptr %x, i32 zeroext %idx) nounwind {
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
; CHECK-NEXT: vfadd.vv v8, v8, v8
-; CHECK-NEXT: vsetivli zero, 1, e16, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -542,7 +534,6 @@ define float @extractelt_v8f32_idx(ptr %x, i32 zeroext %idx) nounwind {
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vfadd.vv v8, v8, v8
-; CHECK-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -558,7 +549,6 @@ define double @extractelt_v4f64_idx(ptr %x, i32 zeroext %idx) nounwind {
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vfadd.vv v8, v8, v8
-; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a1
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
@@ -594,7 +584,6 @@ define i64 @extractelt_v3i64_idx(ptr %x, i32 zeroext %idx) nounwind {
; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vadd.vv v8, v8, v8
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vx v8, v8, a1
; RV64-NEXT: vmv.x.s a0, v8
; RV64-NEXT: ret
@@ -721,9 +710,9 @@ define i32 @extractelt_v64i32_idx(ptr %x, i32 zeroext %idx) nounwind {
; RV32-NEXT: andi a1, a1, 63
; RV32-NEXT: slli a1, a1, 2
; RV32-NEXT: li a2, 32
+; RV32-NEXT: addi a3, a0, 128
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT: addi a2, a0, 128
-; RV32-NEXT: vle32.v v8, (a2)
+; RV32-NEXT: vle32.v v8, (a3)
; RV32-NEXT: vle32.v v16, (a0)
; RV32-NEXT: mv a0, sp
; RV32-NEXT: add a1, a0, a1
@@ -749,9 +738,9 @@ define i32 @extractelt_v64i32_idx(ptr %x, i32 zeroext %idx) nounwind {
; RV64-NEXT: andi a1, a1, 63
; RV64-NEXT: slli a1, a1, 2
; RV64-NEXT: li a2, 32
+; RV64-NEXT: addi a3, a0, 128
; RV64-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; RV64-NEXT: addi a2, a0, 128
-; RV64-NEXT: vle32.v v8, (a2)
+; RV64-NEXT: vle32.v v8, (a3)
; RV64-NEXT: vle32.v v16, (a0)
; RV64-NEXT: mv a0, sp
; RV64-NEXT: add a1, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
index 287dd510674d..c1b4c5fda6c6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-floor-vp.ll
@@ -204,8 +204,8 @@ define <8 x half> @vp_floor_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 2
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <16 x half> @llvm.vp.floor.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_floor_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 2
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <16 x half> @vp_floor_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 2
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -439,8 +439,8 @@ define <8 x float> @vp_floor_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -483,8 +483,8 @@ define <16 x float> @vp_floor_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -561,16 +561,16 @@ declare <4 x double> @llvm.vp.floor.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_floor_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -605,16 +605,16 @@ declare <8 x double> @llvm.vp.floor.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_floor_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -649,16 +649,16 @@ declare <15 x double> @llvm.vp.floor.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_floor_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -693,16 +693,16 @@ declare <16 x double> @llvm.vp.floor.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_floor_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -737,69 +737,59 @@ declare <32 x double> @llvm.vp.floor.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_floor_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v24, v0, 2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a1, 2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
index edb33158e32e..51eb63f5f922 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fmaximum-vp.ll
@@ -177,8 +177,8 @@ define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v16, v12, v14, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v8, v14, v14, v0.t
@@ -253,8 +253,8 @@ define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1>
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v20, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmfeq.vv v8, v20, v20, v0.t
@@ -608,7 +608,6 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
@@ -618,28 +617,28 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB24_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB24_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v26, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a0, vlenb
@@ -666,13 +665,13 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a0, vlenb
@@ -759,9 +758,9 @@ define <32 x double> @vfmax_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmfeq.vv v0, v16, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
index 48649c43f782..03e0ac42c442 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fminimum-vp.ll
@@ -177,8 +177,8 @@ define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v16, v12, v14, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v8, v14, v14, v0.t
@@ -253,8 +253,8 @@ define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1>
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
; ZVFHMIN-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v20, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmfeq.vv v8, v20, v20, v0.t
@@ -608,7 +608,6 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
@@ -618,28 +617,28 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB24_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB24_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v26, v8, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a0, vlenb
@@ -666,13 +665,13 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a0, vlenb
@@ -759,9 +758,9 @@ define <32 x double> @vfmin_vv_v32f64_unmasked(<32 x double> %va, <32 x double>
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmfeq.vv v0, v16, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
index 9e83efd35195..379a51f4eee3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-buildvec.ll
@@ -39,9 +39,9 @@ define <4 x float> @hang_when_merging_stores_after_legalization(<8 x float> %x,
; CHECK-NEXT: vmul.vx v14, v12, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vrgatherei16.vv v12, v8, v14
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vadd.vi v8, v14, -14
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vmv.v.i v0, 12
+; CHECK-NEXT: vadd.vi v8, v14, -14
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vrgatherei16.vv v12, v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v8, v12
@@ -1407,8 +1407,8 @@ define <8 x float> @buildvec_v8f32_zvl256(float %e0, float %e1, float %e2, float
; CHECK-NEXT: vfmv.v.f v8, fa4
; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: vfslide1down.vf v8, v8, fa6
-; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: ret
%v0 = insertelement <8 x float> poison, float %e0, i64 0
@@ -1458,8 +1458,8 @@ define <8 x double> @buildvec_v8f64_zvl512(double %e0, double %e1, double %e2, d
; CHECK-NEXT: vfmv.v.f v8, fa4
; CHECK-NEXT: vfslide1down.vf v8, v8, fa5
; CHECK-NEXT: vfslide1down.vf v8, v8, fa6
-; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vfslide1down.vf v8, v8, fa7
; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: ret
%v0 = insertelement <8 x double> poison, double %e0, i64 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
index ed152e64a91e..f3b124aa34dc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
@@ -56,9 +56,9 @@ define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
; RV32-V512-NEXT: vid.v v10
; RV32-V512-NEXT: vsrl.vi v11, v10, 1
+; RV32-V512-NEXT: vmv.v.i v0, 10
; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV32-V512-NEXT: vrgatherei16.vv v10, v8, v11
-; RV32-V512-NEXT: vmv.v.i v0, 10
; RV32-V512-NEXT: vrgatherei16.vv v10, v9, v11, v0.t
; RV32-V512-NEXT: vmv.v.v v8, v10
; RV32-V512-NEXT: ret
@@ -68,8 +68,8 @@ define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
; RV64-V512-NEXT: vsetivli zero, 4, e64, m1, ta, mu
; RV64-V512-NEXT: vid.v v10
; RV64-V512-NEXT: vsrl.vi v11, v10, 1
-; RV64-V512-NEXT: vrgather.vv v10, v8, v11
; RV64-V512-NEXT: vmv.v.i v0, 10
+; RV64-V512-NEXT: vrgather.vv v10, v8, v11
; RV64-V512-NEXT: vrgather.vv v10, v9, v11, v0.t
; RV64-V512-NEXT: vmv.v.v v8, v10
; RV64-V512-NEXT: ret
@@ -261,13 +261,13 @@ define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) {
; V128-NEXT: vwmaccu.vx v8, a0, v16
; V128-NEXT: lui a1, 699051
; V128-NEXT: addi a1, a1, -1366
-; V128-NEXT: li a2, 32
; V128-NEXT: vmv.s.x v0, a1
-; V128-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; V128-NEXT: li a1, 32
+; V128-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; V128-NEXT: vmerge.vvm v24, v8, v24, v0
-; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; V128-NEXT: addi a1, sp, 16
; V128-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; V128-NEXT: vwaddu.vv v0, v16, v8
; V128-NEXT: vwmaccu.vx v0, a0, v8
; V128-NEXT: vmv8r.v v8, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
index 8dc32d13e4a3..45c0a22b1939 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-shuffles.ll
@@ -5,9 +5,8 @@
define <4 x half> @shuffle_v4f16(<4 x half> %x, <4 x half> %y) {
; CHECK-LABEL: shuffle_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 11
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 11
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x half> %x, <4 x half> %y, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
@@ -93,12 +92,11 @@ define <4 x double> @vrgather_shuffle_vv_v4f64(<4 x double> %x, <4 x double> %y)
; CHECK: # %bb.0:
; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v14, (a0)
-; CHECK-NEXT: vrgatherei16.vv v12, v8, v14
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v0, 8
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
+; CHECK-NEXT: vrgatherei16.vv v12, v8, v14
; CHECK-NEXT: vrgather.vi v12, v10, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
@@ -109,13 +107,13 @@ define <4 x double> @vrgather_shuffle_vv_v4f64(<4 x double> %x, <4 x double> %y)
define <4 x double> @vrgather_shuffle_xv_v4f64(<4 x double> %x) {
; CHECK-LABEL: vrgather_shuffle_xv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
-; CHECK-NEXT: vid.v v12
; CHECK-NEXT: lui a0, %hi(.LCPI7_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI7_0)
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vlse64.v v10, (a0), zero
-; CHECK-NEXT: vrsub.vi v12, v12, 4
+; CHECK-NEXT: vid.v v12
; CHECK-NEXT: vmv.v.i v0, 12
+; CHECK-NEXT: vrsub.vi v12, v12, 4
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
@@ -129,12 +127,12 @@ define <4 x double> @vrgather_shuffle_vx_v4f64(<4 x double> %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vid.v v12
+; CHECK-NEXT: lui a0, %hi(.LCPI8_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI8_0)
+; CHECK-NEXT: vlse64.v v10, (a0), zero
; CHECK-NEXT: li a0, 3
-; CHECK-NEXT: lui a1, %hi(.LCPI8_0)
-; CHECK-NEXT: addi a1, a1, %lo(.LCPI8_0)
-; CHECK-NEXT: vlse64.v v10, (a1), zero
-; CHECK-NEXT: vmul.vx v12, v12, a0
; CHECK-NEXT: vmv.v.i v0, 3
+; CHECK-NEXT: vmul.vx v12, v12, a0
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vrgatherei16.vv v10, v8, v12, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
index 0f003d7af610..d25312268ada 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp.ll
@@ -1199,7 +1199,7 @@ declare <4 x half> @llvm.copysign.v4f16(<4 x half>, <4 x half>)
define void @copysign_neg_trunc_v3f16_v3f32(ptr %x, ptr %y) {
; ZVFH-LABEL: copysign_neg_trunc_v3f16_v3f32:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vsetivli zero, 3, e16, mf2, ta, ma
+; ZVFH-NEXT: vsetivli zero, 3, e32, m1, ta, ma
; ZVFH-NEXT: vle32.v v8, (a1)
; ZVFH-NEXT: vle16.v v9, (a0)
; ZVFH-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
index 6320b07125bb..bc46e7d264bc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp2i-sat.ll
@@ -351,25 +351,23 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: fmin.d fa3, fa3, fa4
; RV32-NEXT: fcvt.w.d a2, fa3, rtz
; RV32-NEXT: and a0, a0, a2
-; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa3, v8
-; RV32-NEXT: feq.d a0, fa3, fa3
+; RV32-NEXT: feq.d a2, fa3, fa3
; RV32-NEXT: fmax.d fa3, fa3, fa5
; RV32-NEXT: fmin.d fa3, fa3, fa4
-; RV32-NEXT: fcvt.w.d a2, fa3, rtz
+; RV32-NEXT: fcvt.w.d a3, fa3, rtz
; RV32-NEXT: fld fa3, 40(sp)
-; RV32-NEXT: neg a0, a0
-; RV32-NEXT: and a0, a0, a2
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; RV32-NEXT: vslide1down.vx v8, v10, a0
+; RV32-NEXT: neg a0, a2
+; RV32-NEXT: and a0, a0, a3
; RV32-NEXT: feq.d a2, fa3, fa3
; RV32-NEXT: fmax.d fa3, fa3, fa5
; RV32-NEXT: fmin.d fa3, fa3, fa4
; RV32-NEXT: fcvt.w.d a3, fa3, rtz
; RV32-NEXT: fld fa3, 32(sp)
-; RV32-NEXT: vslide1down.vx v8, v10, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: neg a0, a2
; RV32-NEXT: and a0, a0, a3
; RV32-NEXT: feq.d a2, fa3, fa3
@@ -395,8 +393,8 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: fmin.d fa5, fa5, fa4
; RV32-NEXT: fcvt.w.d a2, fa5, rtz
; RV32-NEXT: and a0, a0, a2
-; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
; RV32-NEXT: vse8.v v9, (a1)
; RV32-NEXT: addi sp, s0, -128
@@ -452,25 +450,23 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: fmin.d fa3, fa3, fa4
; RV64-NEXT: fcvt.l.d a2, fa3, rtz
; RV64-NEXT: and a0, a0, a2
-; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vfmv.f.s fa3, v8
-; RV64-NEXT: feq.d a0, fa3, fa3
+; RV64-NEXT: feq.d a2, fa3, fa3
; RV64-NEXT: fmax.d fa3, fa3, fa5
; RV64-NEXT: fmin.d fa3, fa3, fa4
-; RV64-NEXT: fcvt.l.d a2, fa3, rtz
+; RV64-NEXT: fcvt.l.d a3, fa3, rtz
; RV64-NEXT: fld fa3, 40(sp)
-; RV64-NEXT: neg a0, a0
-; RV64-NEXT: and a0, a0, a2
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; RV64-NEXT: vslide1down.vx v8, v10, a0
+; RV64-NEXT: neg a0, a2
+; RV64-NEXT: and a0, a0, a3
; RV64-NEXT: feq.d a2, fa3, fa3
; RV64-NEXT: fmax.d fa3, fa3, fa5
; RV64-NEXT: fmin.d fa3, fa3, fa4
; RV64-NEXT: fcvt.l.d a3, fa3, rtz
; RV64-NEXT: fld fa3, 32(sp)
-; RV64-NEXT: vslide1down.vx v8, v10, a0
+; RV64-NEXT: vslide1down.vx v8, v8, a0
; RV64-NEXT: neg a0, a2
; RV64-NEXT: and a0, a0, a3
; RV64-NEXT: feq.d a2, fa3, fa3
@@ -496,8 +492,8 @@ define void @fp2si_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: fmin.d fa5, fa5, fa4
; RV64-NEXT: fcvt.l.d a2, fa5, rtz
; RV64-NEXT: and a0, a0, a2
-; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
; RV64-NEXT: vse8.v v9, (a1)
; RV64-NEXT: addi sp, s0, -128
@@ -542,46 +538,43 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fmin.d fa4, fa4, fa5
; RV32-NEXT: fcvt.wu.d a2, fa4, rtz
-; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV32-NEXT: vmv.v.x v10, a2
-; RV32-NEXT: vslide1down.vx v10, v10, a0
; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 2
-; RV32-NEXT: vfmv.f.s fa4, v12
+; RV32-NEXT: vslidedown.vi v10, v8, 2
+; RV32-NEXT: vfmv.f.s fa4, v10
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fmin.d fa4, fa4, fa5
-; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
-; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: fcvt.wu.d a3, fa4, rtz
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa4, v8
-; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fld fa2, 40(sp)
-; RV32-NEXT: fmin.d fa4, fa4, fa5
-; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
-; RV32-NEXT: fld fa4, 32(sp)
-; RV32-NEXT: fmax.d fa2, fa2, fa3
-; RV32-NEXT: fmin.d fa2, fa2, fa5
-; RV32-NEXT: fcvt.wu.d a2, fa2, rtz
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fmin.d fa4, fa4, fa5
-; RV32-NEXT: fld fa2, 48(sp)
-; RV32-NEXT: fcvt.wu.d a3, fa4, rtz
+; RV32-NEXT: fcvt.wu.d a4, fa4, rtz
+; RV32-NEXT: fmax.d fa4, fa2, fa3
+; RV32-NEXT: fld fa2, 32(sp)
+; RV32-NEXT: fmin.d fa4, fa4, fa5
+; RV32-NEXT: fcvt.wu.d a5, fa4, rtz
; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; RV32-NEXT: vslide1down.vx v8, v10, a0
+; RV32-NEXT: vmv.v.x v8, a2
; RV32-NEXT: fmax.d fa4, fa2, fa3
; RV32-NEXT: fmin.d fa4, fa4, fa5
+; RV32-NEXT: fcvt.wu.d a2, fa4, rtz
+; RV32-NEXT: fld fa4, 48(sp)
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a3
+; RV32-NEXT: vslide1down.vx v8, v8, a4
+; RV32-NEXT: fmax.d fa4, fa4, fa3
+; RV32-NEXT: fmin.d fa4, fa4, fa5
; RV32-NEXT: fcvt.wu.d a0, fa4, rtz
; RV32-NEXT: fld fa4, 56(sp)
-; RV32-NEXT: vmv.v.x v9, a3
-; RV32-NEXT: vslide1down.vx v9, v9, a2
+; RV32-NEXT: vmv.v.x v9, a2
+; RV32-NEXT: vslide1down.vx v9, v9, a5
; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: fmax.d fa4, fa4, fa3
; RV32-NEXT: fmin.d fa5, fa4, fa5
; RV32-NEXT: fcvt.wu.d a0, fa5, rtz
-; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vslidedown.vi v9, v8, 4, v0.t
; RV32-NEXT: vse8.v v9, (a1)
; RV32-NEXT: addi sp, s0, -128
@@ -618,46 +611,43 @@ define void @fp2ui_v8f64_v8i8(ptr %x, ptr %y) {
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fmin.d fa4, fa4, fa5
; RV64-NEXT: fcvt.lu.d a2, fa4, rtz
-; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV64-NEXT: vmv.v.x v10, a2
-; RV64-NEXT: vslide1down.vx v10, v10, a0
; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-NEXT: vslidedown.vi v12, v8, 2
-; RV64-NEXT: vfmv.f.s fa4, v12
+; RV64-NEXT: vslidedown.vi v10, v8, 2
+; RV64-NEXT: vfmv.f.s fa4, v10
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fmin.d fa4, fa4, fa5
-; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
-; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-NEXT: fcvt.lu.d a3, fa4, rtz
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vfmv.f.s fa4, v8
-; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fld fa2, 40(sp)
-; RV64-NEXT: fmin.d fa4, fa4, fa5
-; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
-; RV64-NEXT: fld fa4, 32(sp)
-; RV64-NEXT: fmax.d fa2, fa2, fa3
-; RV64-NEXT: fmin.d fa2, fa2, fa5
-; RV64-NEXT: fcvt.lu.d a2, fa2, rtz
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fmin.d fa4, fa4, fa5
-; RV64-NEXT: fld fa2, 48(sp)
-; RV64-NEXT: fcvt.lu.d a3, fa4, rtz
+; RV64-NEXT: fcvt.lu.d a4, fa4, rtz
+; RV64-NEXT: fmax.d fa4, fa2, fa3
+; RV64-NEXT: fld fa2, 32(sp)
+; RV64-NEXT: fmin.d fa4, fa4, fa5
+; RV64-NEXT: fcvt.lu.d a5, fa4, rtz
; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; RV64-NEXT: vslide1down.vx v8, v10, a0
+; RV64-NEXT: vmv.v.x v8, a2
; RV64-NEXT: fmax.d fa4, fa2, fa3
; RV64-NEXT: fmin.d fa4, fa4, fa5
+; RV64-NEXT: fcvt.lu.d a2, fa4, rtz
+; RV64-NEXT: fld fa4, 48(sp)
+; RV64-NEXT: vslide1down.vx v8, v8, a0
+; RV64-NEXT: vslide1down.vx v8, v8, a3
+; RV64-NEXT: vslide1down.vx v8, v8, a4
+; RV64-NEXT: fmax.d fa4, fa4, fa3
+; RV64-NEXT: fmin.d fa4, fa4, fa5
; RV64-NEXT: fcvt.lu.d a0, fa4, rtz
; RV64-NEXT: fld fa4, 56(sp)
-; RV64-NEXT: vmv.v.x v9, a3
-; RV64-NEXT: vslide1down.vx v9, v9, a2
+; RV64-NEXT: vmv.v.x v9, a2
+; RV64-NEXT: vslide1down.vx v9, v9, a5
; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: fmax.d fa4, fa4, fa3
; RV64-NEXT: fmin.d fa5, fa4, fa5
; RV64-NEXT: fcvt.lu.d a0, fa5, rtz
-; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: vmv.v.i v0, 15
+; RV64-NEXT: vslide1down.vx v9, v9, a0
; RV64-NEXT: vslidedown.vi v9, v8, 4, v0.t
; RV64-NEXT: vse8.v v9, (a1)
; RV64-NEXT: addi sp, s0, -128
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll
index 48cc3f17a626..f195eeadf027 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fpext-vp.ll
@@ -96,8 +96,8 @@ declare <32 x double> @llvm.vp.fpext.v32f64.v32f32(<32 x float>, <32 x i1>, i32)
define <32 x double> @vfpext_v32f32_v32f64(<32 x float> %a, <32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vfpext_v32f32_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB7_2
@@ -112,8 +112,8 @@ define <32 x double> @vfpext_v32f32_v32f64(<32 x float> %a, <32 x i1> %m, i32 ze
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 16
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v16, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
index 49a1b19b58a2..a4050b716e78 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptosi-vp.ll
@@ -394,8 +394,8 @@ declare <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x i64> @vfptosi_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptosi_v32i64_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB25_2
@@ -408,8 +408,8 @@ define <32 x i64> @vfptosi_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 ze
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x i64> @llvm.vp.fptosi.v32i64.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
index d44efa2f6133..b652cdd88c7c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptoui-vp.ll
@@ -394,8 +394,8 @@ declare <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x i64> @vfptoui_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptoui_v32i64_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB25_2
@@ -408,8 +408,8 @@ define <32 x i64> @vfptoui_v32i64_v32f64(<32 x double> %va, <32 x i1> %m, i32 ze
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.xu.f.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x i64> @llvm.vp.fptoui.v32i64.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
index d890bf5412f9..920eed322363 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fptrunc-vp.ll
@@ -98,8 +98,8 @@ define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32
; CHECK-LABEL: vfptrunc_v32f32_v32f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v12, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB7_2
@@ -112,8 +112,8 @@ define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v24, v16, v0.t
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 53de1a875535..e81f686a2830 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -277,14 +277,14 @@ define void @insert_v8i32_v2i32_0(ptr %vp, ptr %svp) {
define void @insert_v8i32_v2i32_2(ptr %vp, ptr %svp) {
; VLA-LABEL: insert_v8i32_v2i32_2:
; VLA: # %bb.0:
-; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; VLA-NEXT: vle32.v v8, (a1)
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; VLA-NEXT: vle32.v v10, (a0)
+; VLA-NEXT: vle32.v v8, (a0)
+; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; VLA-NEXT: vle32.v v10, (a1)
; VLA-NEXT: vsetivli zero, 4, e32, m2, tu, ma
-; VLA-NEXT: vslideup.vi v10, v8, 2
+; VLA-NEXT: vslideup.vi v8, v10, 2
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; VLA-NEXT: vse32.v v10, (a0)
+; VLA-NEXT: vse32.v v8, (a0)
; VLA-NEXT: ret
;
; VLS-LABEL: insert_v8i32_v2i32_2:
@@ -306,12 +306,13 @@ define void @insert_v8i32_v2i32_2(ptr %vp, ptr %svp) {
define void @insert_v8i32_v2i32_6(ptr %vp, ptr %svp) {
; VLA-LABEL: insert_v8i32_v2i32_6:
; VLA: # %bb.0:
+; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; VLA-NEXT: vle32.v v8, (a0)
; VLA-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; VLA-NEXT: vle32.v v8, (a1)
+; VLA-NEXT: vle32.v v10, (a1)
; VLA-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; VLA-NEXT: vle32.v v10, (a0)
-; VLA-NEXT: vslideup.vi v10, v8, 6
-; VLA-NEXT: vse32.v v10, (a0)
+; VLA-NEXT: vslideup.vi v8, v10, 6
+; VLA-NEXT: vse32.v v8, (a0)
; VLA-NEXT: ret
;
; VLS-LABEL: insert_v8i32_v2i32_6:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
index 4954827876c1..776a1e9bab6b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert.ll
@@ -533,11 +533,11 @@ define void @insertelt_c6_v8i64_0_add(ptr %x, ptr %y) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: li a2, 6
+; CHECK-NEXT: vle64.v v12, (a1)
+; CHECK-NEXT: li a1, 6
; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma
-; CHECK-NEXT: vmv.s.x v8, a2
+; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT: vle64.v v12, (a1)
; CHECK-NEXT: vadd.vv v8, v8, v12
; CHECK-NEXT: vse64.v v8, (a0)
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
index 4f4f0a09de74..4a5d37b2a85a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-buildvec.ll
@@ -669,13 +669,14 @@ define void @buildvec_seq_v9i8(ptr %x) {
; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 3
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmv.v.i v9, 3
; CHECK-NEXT: li a1, 146
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v8, 2, v0
+; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v9, 2, v0
; CHECK-NEXT: vsetivli zero, 9, e8, m1, ta, ma
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
@@ -1214,11 +1215,11 @@ define <16 x i8> @buildvec_v16i8_loads_contigous(ptr %p) {
; CHECK-NEXT: vslide1down.vx v8, v8, t4
; CHECK-NEXT: vslide1down.vx v8, v8, t5
; CHECK-NEXT: vslide1down.vx v8, v8, t6
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
-; CHECK-NEXT: li a0, 255
+; CHECK-NEXT: li a1, 255
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vslide1down.vx v8, v8, a0
; CHECK-NEXT: vslidedown.vi v8, v10, 8, v0.t
; CHECK-NEXT: ret
%p2 = getelementptr i8, ptr %p, i32 1
@@ -1308,11 +1309,11 @@ define <16 x i8> @buildvec_v16i8_loads_gather(ptr %p) {
; CHECK-NEXT: vslide1down.vx v8, v8, t4
; CHECK-NEXT: vslide1down.vx v8, v8, t5
; CHECK-NEXT: vslide1down.vx v8, v8, t6
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
-; CHECK-NEXT: li a0, 255
+; CHECK-NEXT: li a1, 255
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vslide1down.vx v8, v8, a0
; CHECK-NEXT: vslidedown.vi v8, v10, 8, v0.t
; CHECK-NEXT: ret
%p2 = getelementptr i8, ptr %p, i32 1
@@ -1488,11 +1489,11 @@ define <16 x i8> @buildvec_v16i8_undef_edges(ptr %p) {
; CHECK-NEXT: vslide1down.vx v8, v9, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a7
; CHECK-NEXT: vslide1down.vx v8, v8, a0
-; CHECK-NEXT: vslidedown.vi v8, v8, 4
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vslidedown.vi v8, v8, 4
; CHECK-NEXT: vslidedown.vi v8, v10, 8, v0.t
; CHECK-NEXT: ret
%p4 = getelementptr i8, ptr %p, i32 31
@@ -1553,11 +1554,11 @@ define <16 x i8> @buildvec_v16i8_loads_undef_scattered(ptr %p) {
; CHECK-NEXT: vslide1down.vx v8, v8, a7
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vslide1down.vx v8, v8, t0
-; CHECK-NEXT: vslide1down.vx v8, v8, a0
-; CHECK-NEXT: li a0, 255
+; CHECK-NEXT: li a1, 255
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vslide1down.vx v8, v8, a0
; CHECK-NEXT: vslidedown.vi v8, v10, 8, v0.t
; CHECK-NEXT: ret
%p2 = getelementptr i8, ptr %p, i32 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
index 4509642fdef1..e0c676788dcc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-explodevector.ll
@@ -828,112 +828,104 @@ define i64 @explode_8xi64(<8 x i64> %v) {
define i64 @explode_16xi64(<16 x i64> %v) {
; RV32-LABEL: explode_16xi64:
; RV32: # %bb.0:
-; RV32-NEXT: addi sp, sp, -64
-; RV32-NEXT: .cfi_def_cfa_offset 64
-; RV32-NEXT: sw ra, 60(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s0, 56(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s1, 52(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s2, 48(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s3, 44(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s4, 40(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s5, 36(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s6, 32(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s7, 28(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s8, 24(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s9, 20(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s10, 16(sp) # 4-byte Folded Spill
-; RV32-NEXT: sw s11, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
-; RV32-NEXT: .cfi_offset s0, -8
-; RV32-NEXT: .cfi_offset s1, -12
-; RV32-NEXT: .cfi_offset s2, -16
-; RV32-NEXT: .cfi_offset s3, -20
-; RV32-NEXT: .cfi_offset s4, -24
-; RV32-NEXT: .cfi_offset s5, -28
-; RV32-NEXT: .cfi_offset s6, -32
-; RV32-NEXT: .cfi_offset s7, -36
-; RV32-NEXT: .cfi_offset s8, -40
-; RV32-NEXT: .cfi_offset s9, -44
-; RV32-NEXT: .cfi_offset s10, -48
-; RV32-NEXT: .cfi_offset s11, -52
+; RV32-NEXT: addi sp, sp, -48
+; RV32-NEXT: .cfi_def_cfa_offset 48
+; RV32-NEXT: sw s0, 44(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s1, 40(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s2, 36(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s3, 32(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s4, 28(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s5, 24(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s6, 20(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s7, 16(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s8, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s9, 8(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s10, 4(sp) # 4-byte Folded Spill
+; RV32-NEXT: sw s11, 0(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset s0, -4
+; RV32-NEXT: .cfi_offset s1, -8
+; RV32-NEXT: .cfi_offset s2, -12
+; RV32-NEXT: .cfi_offset s3, -16
+; RV32-NEXT: .cfi_offset s4, -20
+; RV32-NEXT: .cfi_offset s5, -24
+; RV32-NEXT: .cfi_offset s6, -28
+; RV32-NEXT: .cfi_offset s7, -32
+; RV32-NEXT: .cfi_offset s8, -36
+; RV32-NEXT: .cfi_offset s9, -40
+; RV32-NEXT: .cfi_offset s10, -44
+; RV32-NEXT: .cfi_offset s11, -48
; RV32-NEXT: vsetivli zero, 1, e64, m8, ta, ma
; RV32-NEXT: vslidedown.vi v16, v8, 2
-; RV32-NEXT: li a3, 32
-; RV32-NEXT: vsrl.vx v24, v16, a3
-; RV32-NEXT: vmv.x.s a0, v24
-; RV32-NEXT: vmv.x.s a1, v16
-; RV32-NEXT: sw a1, 8(sp) # 4-byte Folded Spill
-; RV32-NEXT: vslidedown.vi v16, v8, 3
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: li a0, 32
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s a1, v24
-; RV32-NEXT: sw a1, 4(sp) # 4-byte Folded Spill
+; RV32-NEXT: vmv.x.s a2, v16
+; RV32-NEXT: vslidedown.vi v16, v8, 3
+; RV32-NEXT: vsrl.vx v24, v16, a0
+; RV32-NEXT: vmv.x.s a3, v24
; RV32-NEXT: vmv.x.s a4, v16
; RV32-NEXT: vslidedown.vi v16, v8, 4
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s a5, v24
; RV32-NEXT: vmv.x.s a6, v16
; RV32-NEXT: vslidedown.vi v16, v8, 5
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s a7, v24
; RV32-NEXT: vmv.x.s t0, v16
; RV32-NEXT: vslidedown.vi v16, v8, 6
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s t1, v24
; RV32-NEXT: vmv.x.s t2, v16
; RV32-NEXT: vslidedown.vi v16, v8, 7
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s t3, v24
; RV32-NEXT: vmv.x.s t4, v16
; RV32-NEXT: vslidedown.vi v16, v8, 8
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s t5, v24
; RV32-NEXT: vmv.x.s t6, v16
; RV32-NEXT: vslidedown.vi v16, v8, 9
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s s0, v24
; RV32-NEXT: vmv.x.s s1, v16
; RV32-NEXT: vslidedown.vi v16, v8, 10
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s s2, v24
; RV32-NEXT: vmv.x.s s3, v16
; RV32-NEXT: vslidedown.vi v16, v8, 11
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s s4, v24
; RV32-NEXT: vmv.x.s s5, v16
; RV32-NEXT: vslidedown.vi v16, v8, 12
-; RV32-NEXT: vsrl.vx v24, v16, a3
+; RV32-NEXT: vsrl.vx v24, v16, a0
; RV32-NEXT: vmv.x.s s6, v24
; RV32-NEXT: vmv.x.s s7, v16
; RV32-NEXT: vslidedown.vi v16, v8, 13
-; RV32-NEXT: vsrl.vx v24, v16, a3
-; RV32-NEXT: vmv.x.s s8, v24
-; RV32-NEXT: vmv.x.s s9, v16
+; RV32-NEXT: vsrl.vx v24, v16, a0
+; RV32-NEXT: vmv.x.s s9, v24
+; RV32-NEXT: vmv.x.s s8, v16
; RV32-NEXT: vslidedown.vi v16, v8, 14
-; RV32-NEXT: vsrl.vx v24, v16, a3
-; RV32-NEXT: vmv.x.s s10, v24
-; RV32-NEXT: vmv.x.s s11, v16
-; RV32-NEXT: vslidedown.vi v16, v8, 15
-; RV32-NEXT: vsrl.vx v24, v16, a3
-; RV32-NEXT: vmv.x.s ra, v24
-; RV32-NEXT: vmv.s.x v9, zero
-; RV32-NEXT: vmv.x.s a2, v16
+; RV32-NEXT: vsrl.vx v24, v16, a0
+; RV32-NEXT: vmv.s.x v17, zero
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV32-NEXT: vredxor.vs v8, v8, v9
+; RV32-NEXT: vredxor.vs v17, v8, v17
+; RV32-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 15
; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; RV32-NEXT: vsrl.vx v9, v8, a3
-; RV32-NEXT: vmv.x.s a3, v9
-; RV32-NEXT: add a3, a3, a0
-; RV32-NEXT: vmv.x.s a1, v8
-; RV32-NEXT: lw a0, 8(sp) # 4-byte Folded Reload
-; RV32-NEXT: add a0, a1, a0
-; RV32-NEXT: sltu a1, a0, a1
-; RV32-NEXT: add a1, a3, a1
-; RV32-NEXT: lw a3, 4(sp) # 4-byte Folded Reload
-; RV32-NEXT: add a1, a1, a3
-; RV32-NEXT: add a4, a0, a4
-; RV32-NEXT: sltu a0, a4, a0
-; RV32-NEXT: add a0, a0, a5
+; RV32-NEXT: vsrl.vx v18, v17, a0
+; RV32-NEXT: vmv.x.s s10, v18
+; RV32-NEXT: vmv.x.s s11, v17
+; RV32-NEXT: vsetivli zero, 1, e64, m8, ta, ma
+; RV32-NEXT: vsrl.vx v0, v8, a0
+; RV32-NEXT: add a1, s10, a1
+; RV32-NEXT: add a2, s11, a2
+; RV32-NEXT: sltu a0, a2, s11
; RV32-NEXT: add a0, a1, a0
+; RV32-NEXT: add a0, a0, a3
+; RV32-NEXT: add a4, a2, a4
+; RV32-NEXT: sltu a1, a4, a2
+; RV32-NEXT: add a1, a1, a5
+; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add a6, a4, a6
; RV32-NEXT: sltu a1, a6, a4
; RV32-NEXT: add a1, a1, a7
@@ -968,33 +960,36 @@ define i64 @explode_16xi64(<16 x i64> %v) {
; RV32-NEXT: add a0, a0, a1
; RV32-NEXT: add s7, s5, s7
; RV32-NEXT: sltu a1, s7, s5
-; RV32-NEXT: add a1, a1, s8
+; RV32-NEXT: add a1, a1, s9
; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add s9, s7, s9
-; RV32-NEXT: sltu a1, s9, s7
-; RV32-NEXT: add a1, a1, s10
+; RV32-NEXT: vmv.x.s a1, v24
+; RV32-NEXT: add s8, s7, s8
+; RV32-NEXT: sltu a2, s8, s7
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: vmv.x.s a2, v16
; RV32-NEXT: add a0, a0, a1
-; RV32-NEXT: add s11, s9, s11
-; RV32-NEXT: sltu a1, s11, s9
-; RV32-NEXT: add a1, a1, ra
+; RV32-NEXT: vmv.x.s a1, v0
+; RV32-NEXT: add a2, s8, a2
+; RV32-NEXT: sltu a3, a2, s8
+; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, a0, a1
-; RV32-NEXT: add a0, s11, a2
-; RV32-NEXT: sltu a2, a0, s11
+; RV32-NEXT: vmv.x.s a0, v8
+; RV32-NEXT: add a0, a2, a0
+; RV32-NEXT: sltu a2, a0, a2
; RV32-NEXT: add a1, a1, a2
-; RV32-NEXT: lw ra, 60(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s0, 56(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s1, 52(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s2, 48(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s3, 44(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s4, 40(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s5, 36(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s6, 32(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s7, 28(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s8, 24(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s9, 20(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s10, 16(sp) # 4-byte Folded Reload
-; RV32-NEXT: lw s11, 12(sp) # 4-byte Folded Reload
-; RV32-NEXT: addi sp, sp, 64
+; RV32-NEXT: lw s0, 44(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s1, 40(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s2, 36(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s3, 32(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s4, 28(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s5, 24(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s6, 20(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s7, 16(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s8, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s9, 8(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s10, 4(sp) # 4-byte Folded Reload
+; RV32-NEXT: lw s11, 0(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 48
; RV32-NEXT: ret
;
; RV64-LABEL: explode_16xi64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
index 40ff8b50d99d..2ea90203b210 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
@@ -69,9 +69,9 @@ define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) {
; RV32-V512-NEXT: vsetivli zero, 4, e16, mf4, ta, ma
; RV32-V512-NEXT: vid.v v10
; RV32-V512-NEXT: vsrl.vi v11, v10, 1
+; RV32-V512-NEXT: vmv.v.i v0, 10
; RV32-V512-NEXT: vsetvli zero, zero, e64, m1, ta, mu
; RV32-V512-NEXT: vrgatherei16.vv v10, v8, v11
-; RV32-V512-NEXT: vmv.v.i v0, 10
; RV32-V512-NEXT: vrgatherei16.vv v10, v9, v11, v0.t
; RV32-V512-NEXT: vmv.v.v v8, v10
; RV32-V512-NEXT: ret
@@ -81,8 +81,8 @@ define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) {
; RV64-V512-NEXT: vsetivli zero, 4, e64, m1, ta, mu
; RV64-V512-NEXT: vid.v v10
; RV64-V512-NEXT: vsrl.vi v11, v10, 1
-; RV64-V512-NEXT: vrgather.vv v10, v8, v11
; RV64-V512-NEXT: vmv.v.i v0, 10
+; RV64-V512-NEXT: vrgather.vv v10, v8, v11
; RV64-V512-NEXT: vrgather.vv v10, v9, v11, v0.t
; RV64-V512-NEXT: vmv.v.v v8, v10
; RV64-V512-NEXT: ret
@@ -195,8 +195,8 @@ define <4 x i32> @interleave_v4i32_offset_1(<4 x i32> %x, <4 x i32> %y) {
; V128-NEXT: vsetivli zero, 4, e32, m1, ta, mu
; V128-NEXT: vid.v v8
; V128-NEXT: vsrl.vi v8, v8, 1
-; V128-NEXT: vadd.vi v8, v8, 1
; V128-NEXT: vmv.v.i v0, 10
+; V128-NEXT: vadd.vi v8, v8, 1
; V128-NEXT: vrgather.vv v10, v9, v8, v0.t
; V128-NEXT: vmv.v.v v8, v10
; V128-NEXT: ret
@@ -210,8 +210,8 @@ define <4 x i32> @interleave_v4i32_offset_1(<4 x i32> %x, <4 x i32> %y) {
; V512-NEXT: vsetivli zero, 4, e32, mf2, ta, mu
; V512-NEXT: vid.v v8
; V512-NEXT: vsrl.vi v8, v8, 1
-; V512-NEXT: vadd.vi v8, v8, 1
; V512-NEXT: vmv.v.i v0, 10
+; V512-NEXT: vadd.vi v8, v8, 1
; V512-NEXT: vrgather.vv v10, v9, v8, v0.t
; V512-NEXT: vmv1r.v v8, v10
; V512-NEXT: ret
@@ -426,13 +426,13 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
; V128-NEXT: vwmaccu.vx v8, a0, v16
; V128-NEXT: lui a1, 699051
; V128-NEXT: addi a1, a1, -1366
-; V128-NEXT: li a2, 32
; V128-NEXT: vmv.s.x v0, a1
-; V128-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; V128-NEXT: li a1, 32
+; V128-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; V128-NEXT: vmerge.vvm v24, v8, v24, v0
-; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; V128-NEXT: addi a1, sp, 16
; V128-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; V128-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; V128-NEXT: vwaddu.vv v0, v16, v8
; V128-NEXT: vwmaccu.vx v0, a0, v8
; V128-NEXT: vmv8r.v v8, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
index aba69dc84620..32782f1c6045 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
@@ -5,9 +5,8 @@
define <4 x i16> @shuffle_v4i16(<4 x i16> %x, <4 x i16> %y) {
; CHECK-LABEL: shuffle_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 11
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 11
; CHECK-NEXT: vmerge.vvm v8, v9, v8, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> %x, <4 x i16> %y, <4 x i32> <i32 0, i32 1, i32 6, i32 3>
@@ -29,9 +28,8 @@ define <8 x i32> @shuffle_v8i32(<8 x i32> %x, <8 x i32> %y) {
define <4 x i16> @shuffle_xv_v4i16(<4 x i16> %x) {
; CHECK-LABEL: shuffle_xv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 9
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 9
; CHECK-NEXT: vmerge.vim v8, v8, 5, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> <i16 5, i16 5, i16 5, i16 5>, <4 x i16> %x, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
@@ -41,9 +39,8 @@ define <4 x i16> @shuffle_xv_v4i16(<4 x i16> %x) {
define <4 x i16> @shuffle_vx_v4i16(<4 x i16> %x) {
; CHECK-LABEL: shuffle_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 6
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 6
; CHECK-NEXT: vmerge.vim v8, v8, 5, v0
; CHECK-NEXT: ret
%s = shufflevector <4 x i16> %x, <4 x i16> <i16 5, i16 5, i16 5, i16 5>, <4 x i32> <i32 0, i32 5, i32 6, i32 3>
@@ -89,8 +86,8 @@ define <4 x i16> @vrgather_shuffle_vv_v4i16(<4 x i16> %x, <4 x i16> %y) {
; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0)
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
; CHECK-NEXT: vle16.v v11, (a0)
-; CHECK-NEXT: vrgather.vv v10, v8, v11
; CHECK-NEXT: vmv.v.i v0, 8
+; CHECK-NEXT: vrgather.vv v10, v8, v11
; CHECK-NEXT: vrgather.vi v10, v9, 1, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -162,22 +159,21 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT: vmv.v.i v16, 2
-; RV32-NEXT: lui a0, %hi(.LCPI11_0)
-; RV32-NEXT: addi a0, a0, %lo(.LCPI11_0)
-; RV32-NEXT: vle16.v v20, (a0)
; RV32-NEXT: li a0, 5
+; RV32-NEXT: lui a1, %hi(.LCPI11_0)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI11_0)
+; RV32-NEXT: vle16.v v20, (a1)
; RV32-NEXT: vslide1down.vx v21, v16, a0
-; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
-; RV32-NEXT: vrgatherei16.vv v16, v8, v20
; RV32-NEXT: li a0, 164
; RV32-NEXT: vmv.s.x v0, a0
+; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu
+; RV32-NEXT: vrgatherei16.vv v16, v8, v20
; RV32-NEXT: vrgatherei16.vv v16, v12, v21, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: vrgather_shuffle_vv_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vmv4r.v v16, v8
; RV64-NEXT: lui a0, 327683
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: addi a0, a0, 1
@@ -186,7 +182,7 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vmv.v.x v20, a0
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vrgatherei16.vv v8, v16, v20
+; RV64-NEXT: vrgatherei16.vv v16, v8, v20
; RV64-NEXT: li a0, 164
; RV64-NEXT: vmv.s.x v0, a0
; RV64-NEXT: lui a0, 163841
@@ -194,9 +190,10 @@ define <8 x i64> @vrgather_shuffle_vv_v8i64(<8 x i64> %x, <8 x i64> %y) {
; RV64-NEXT: addi a0, a0, 1
; RV64-NEXT: slli a0, a0, 17
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT: vmv.v.x v16, a0
+; RV64-NEXT: vmv.v.x v8, a0
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT: vrgatherei16.vv v8, v12, v16, v0.t
+; RV64-NEXT: vrgatherei16.vv v16, v12, v8, v0.t
+; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
%s = shufflevector <8 x i64> %x, <8 x i64> %y, <8 x i32> <i32 1, i32 2, i32 10, i32 5, i32 1, i32 10, i32 3, i32 13>
ret <8 x i64> %s
@@ -210,13 +207,13 @@ define <8 x i64> @vrgather_shuffle_xv_v8i64(<8 x i64> %x) {
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
; RV32-NEXT: vle16.v v16, (a0)
; RV32-NEXT: vmv.v.i v20, -1
-; RV32-NEXT: vrgatherei16.vv v12, v20, v16
; RV32-NEXT: lui a0, %hi(.LCPI12_1)
; RV32-NEXT: addi a0, a0, %lo(.LCPI12_1)
-; RV32-NEXT: vle16.v v16, (a0)
+; RV32-NEXT: vle16.v v17, (a0)
; RV32-NEXT: li a0, 113
; RV32-NEXT: vmv.s.x v0, a0
-; RV32-NEXT: vrgatherei16.vv v12, v8, v16, v0.t
+; RV32-NEXT: vrgatherei16.vv v12, v20, v16
+; RV32-NEXT: vrgatherei16.vv v12, v8, v17, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
@@ -367,10 +364,10 @@ define <8 x i8> @splat_ve4_ins_i1ve3(<8 x i8> %v) {
define <8 x i8> @splat_ve2_we0(<8 x i8> %v, <8 x i8> %w) {
; CHECK-LABEL: splat_ve2_we0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vrgather.vi v10, v8, 2
; CHECK-NEXT: li a0, 66
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vrgather.vi v10, v8, 2
; CHECK-NEXT: vrgather.vi v10, v9, 0, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -386,10 +383,10 @@ define <8 x i8> @splat_ve2_we0_ins_i0ve4(<8 x i8> %v, <8 x i8> %w) {
; CHECK-NEXT: li a0, 4
; CHECK-NEXT: vsetvli zero, zero, e8, mf2, tu, ma
; CHECK-NEXT: vmv.s.x v11, a0
-; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
-; CHECK-NEXT: vrgather.vv v10, v8, v11
; CHECK-NEXT: li a0, 66
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vv v10, v8, v11
; CHECK-NEXT: vrgather.vi v10, v9, 0, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -402,10 +399,10 @@ define <8 x i8> @splat_ve2_we0_ins_i0we4(<8 x i8> %v, <8 x i8> %w) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vrgather.vi v10, v8, 2
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 4
; CHECK-NEXT: li a0, 67
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vmv.v.i v8, 4
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
@@ -421,10 +418,10 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4(<8 x i8> %v, <8 x i8> %w) {
; CHECK-NEXT: addi a0, a0, 514
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v11, a0
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vrgather.vv v10, v8, v11
; CHECK-NEXT: li a0, 66
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vv v10, v8, v11
; CHECK-NEXT: vrgather.vi v10, v9, 0, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -440,10 +437,10 @@ define <8 x i8> @splat_ve2_we0_ins_i2we4(<8 x i8> %v, <8 x i8> %w) {
; CHECK-NEXT: vmv.v.i v11, 0
; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, ma
; CHECK-NEXT: vslideup.vi v11, v10, 2
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vrgather.vi v10, v8, 2
; CHECK-NEXT: li a0, 70
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vi v10, v8, 2
; CHECK-NEXT: vrgather.vv v10, v9, v11, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -463,10 +460,10 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4_i5we6(<8 x i8> %v, <8 x i8> %w) {
; CHECK-NEXT: addi a0, a0, 2
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vmv.v.x v12, a0
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vrgather.vv v10, v8, v12
; CHECK-NEXT: li a0, 98
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vrgather.vv v10, v8, v12
; CHECK-NEXT: vrgather.vv v10, v9, v11, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -666,8 +663,8 @@ define <8 x i8> @merge_start_into_start(<8 x i8> %v, <8 x i8> %w) {
define <8 x i8> @merge_slidedown(<8 x i8> %v, <8 x i8> %w) {
; CHECK-LABEL: merge_slidedown:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: li a0, 195
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
@@ -680,10 +677,10 @@ define <8 x i8> @merge_slidedown(<8 x i8> %v, <8 x i8> %w) {
define <8 x i8> @merge_non_contiguous_slideup_slidedown(<8 x i8> %v, <8 x i8> %w) {
; CHECK-LABEL: merge_non_contiguous_slideup_slidedown:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: li a0, -22
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vslideup.vi v8, v9, 1, v0.t
; CHECK-NEXT: ret
%res = shufflevector <8 x i8> %v, <8 x i8> %w, <8 x i32> <i32 2, i32 8, i32 4, i32 10, i32 6, i32 12, i32 13, i32 14>
@@ -694,13 +691,13 @@ define <8 x i8> @merge_non_contiguous_slideup_slidedown(<8 x i8> %v, <8 x i8> %w
define <8 x i8> @unmergable(<8 x i8> %v, <8 x i8> %w) {
; CHECK-LABEL: unmergable:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
-; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: lui a0, %hi(.LCPI46_0)
; CHECK-NEXT: addi a0, a0, %lo(.LCPI46_0)
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
; CHECK-NEXT: vle8.v v10, (a0)
; CHECK-NEXT: li a0, -22
; CHECK-NEXT: vmv.s.x v0, a0
+; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
%res = shufflevector <8 x i8> %v, <8 x i8> %w, <8 x i32> <i32 2, i32 9, i32 4, i32 11, i32 6, i32 13, i32 8, i32 15>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
index 635869904832..79c36a629465 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll
@@ -1100,46 +1100,46 @@ define void @mulhu_v16i8(ptr %x) {
; CHECK-LABEL: mulhu_v16i8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v9, (a0)
; CHECK-NEXT: lui a1, 3
; CHECK-NEXT: addi a1, a1, -2044
; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v9, 0
-; CHECK-NEXT: li a1, -128
-; CHECK-NEXT: vmerge.vxm v10, v9, a1, v0
+; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: lui a1, 1
; CHECK-NEXT: addi a2, a1, 32
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a2
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmv.s.x v8, a2
; CHECK-NEXT: lui a2, %hi(.LCPI65_0)
; CHECK-NEXT: addi a2, a2, %lo(.LCPI65_0)
; CHECK-NEXT: vle8.v v11, (a2)
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsrl.vv v9, v8, v9
-; CHECK-NEXT: vmulhu.vv v9, v9, v11
-; CHECK-NEXT: vsub.vv v8, v8, v9
-; CHECK-NEXT: vmulhu.vv v8, v8, v10
-; CHECK-NEXT: vadd.vv v8, v8, v9
+; CHECK-NEXT: li a2, -128
+; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vmerge.vxm v12, v10, a2, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 1, v0
+; CHECK-NEXT: vsrl.vv v8, v9, v8
+; CHECK-NEXT: vmulhu.vv v8, v8, v11
+; CHECK-NEXT: vsub.vv v9, v9, v8
+; CHECK-NEXT: vmulhu.vv v9, v9, v12
+; CHECK-NEXT: vadd.vv v9, v9, v8
; CHECK-NEXT: li a2, 513
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a2
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v9, 4
-; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
+; CHECK-NEXT: vmv.v.i v8, 4
+; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
; CHECK-NEXT: addi a1, a1, 78
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmerge.vim v9, v9, 3, v0
; CHECK-NEXT: lui a1, 8
; CHECK-NEXT: addi a1, a1, 304
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vmerge.vim v9, v9, 2, v0
-; CHECK-NEXT: vsrl.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vim v10, v10, 3, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v10, 2, v0
+; CHECK-NEXT: vsrl.vv v8, v9, v8
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = load <16 x i8>, ptr %x
@@ -1158,16 +1158,16 @@ define void @mulhu_v8i16(ptr %x) {
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vsetvli zero, zero, e16, m1, tu, ma
; CHECK-NEXT: vmv.s.x v10, a1
+; CHECK-NEXT: lui a1, %hi(.LCPI66_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI66_0)
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v11, 1
+; CHECK-NEXT: vle16.v v11, (a1)
+; CHECK-NEXT: vmv.v.i v12, 1
; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v9, v11, 6
+; CHECK-NEXT: vslideup.vi v9, v12, 6
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
-; CHECK-NEXT: lui a1, %hi(.LCPI66_0)
-; CHECK-NEXT: addi a1, a1, %lo(.LCPI66_0)
-; CHECK-NEXT: vle16.v v12, (a1)
; CHECK-NEXT: vsrl.vv v9, v8, v9
-; CHECK-NEXT: vmulhu.vv v9, v9, v12
+; CHECK-NEXT: vmulhu.vv v9, v9, v11
; CHECK-NEXT: vsub.vv v8, v8, v9
; CHECK-NEXT: vmulhu.vv v8, v8, v10
; CHECK-NEXT: vadd.vv v8, v8, v9
@@ -1176,7 +1176,7 @@ define void @mulhu_v8i16(ptr %x) {
; CHECK-NEXT: vmv.v.i v9, 3
; CHECK-NEXT: vmerge.vim v9, v9, 2, v0
; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v9, v11, 6
+; CHECK-NEXT: vslideup.vi v9, v12, 6
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9
; CHECK-NEXT: vse16.v v8, (a0)
@@ -1222,18 +1222,18 @@ define void @mulhu_v4i32(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: lui a1, 524288
-; CHECK-NEXT: vmv.s.x v9, a1
-; CHECK-NEXT: vmv.v.i v10, 0
-; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v9, 2
; CHECK-NEXT: lui a1, %hi(.LCPI68_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI68_0)
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v9, (a1)
+; CHECK-NEXT: lui a1, 524288
+; CHECK-NEXT: vmv.s.x v10, a1
+; CHECK-NEXT: vmv.v.i v11, 0
+; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma
+; CHECK-NEXT: vslideup.vi v11, v10, 2
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vmulhu.vv v9, v8, v9
; CHECK-NEXT: vsub.vv v8, v8, v9
-; CHECK-NEXT: vmulhu.vv v8, v8, v10
+; CHECK-NEXT: vmulhu.vv v8, v8, v11
; CHECK-NEXT: vadd.vv v8, v8, v9
; CHECK-NEXT: lui a1, 4128
; CHECK-NEXT: addi a1, a1, 514
@@ -1455,13 +1455,13 @@ define void @mulhs_v2i64(ptr %x) {
; RV64-LABEL: mulhs_v2i64:
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: lui a1, 349525
; RV64-NEXT: addiw a1, a1, 1365
-; RV64-NEXT: slli a2, a1, 32
-; RV64-NEXT: add a1, a1, a2
; RV64-NEXT: lui a2, %hi(.LCPI74_0)
; RV64-NEXT: ld a2, %lo(.LCPI74_0)(a2)
-; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: slli a3, a1, 32
+; RV64-NEXT: add a1, a1, a3
; RV64-NEXT: vmv.v.x v9, a1
; RV64-NEXT: vsetvli zero, zero, e64, m1, tu, ma
; RV64-NEXT: vmv.s.x v9, a2
@@ -3260,49 +3260,47 @@ define void @mulhu_v32i8(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vmv.v.i v10, 0
+; CHECK-NEXT: vle8.v v10, (a0)
+; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: lui a1, 163907
; CHECK-NEXT: addi a1, a1, -2044
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: li a1, -128
-; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vxm v12, v10, a1, v0
; CHECK-NEXT: lui a1, 66049
; CHECK-NEXT: addi a1, a1, 32
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: lui a1, %hi(.LCPI181_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI181_0)
; CHECK-NEXT: vle8.v v14, (a1)
-; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT: vsrl.vv v10, v8, v10
-; CHECK-NEXT: vmulhu.vv v10, v10, v14
-; CHECK-NEXT: vsub.vv v8, v8, v10
-; CHECK-NEXT: vmulhu.vv v8, v8, v12
-; CHECK-NEXT: vadd.vv v8, v8, v10
-; CHECK-NEXT: vmv.v.i v10, 4
+; CHECK-NEXT: li a1, -128
+; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmerge.vxm v16, v12, a1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
+; CHECK-NEXT: vsrl.vv v8, v10, v8
+; CHECK-NEXT: vmulhu.vv v8, v8, v14
+; CHECK-NEXT: vsub.vv v10, v10, v8
+; CHECK-NEXT: vmulhu.vv v10, v10, v16
+; CHECK-NEXT: vadd.vv v10, v10, v8
; CHECK-NEXT: lui a1, 8208
; CHECK-NEXT: addi a1, a1, 513
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
+; CHECK-NEXT: vmv.v.i v8, 4
+; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
; CHECK-NEXT: lui a1, 66785
; CHECK-NEXT: addi a1, a1, 78
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v10, v10, 3, v0
; CHECK-NEXT: lui a1, 529160
; CHECK-NEXT: addi a1, a1, 304
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT: vmv.s.x v0, a1
+; CHECK-NEXT: vmv.s.x v8, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v10, v10, 2, v0
-; CHECK-NEXT: vsrl.vv v8, v8, v10
+; CHECK-NEXT: vmerge.vim v12, v12, 3, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v8, v12, 2, v0
+; CHECK-NEXT: vsrl.vv v8, v10, v8
; CHECK-NEXT: vse8.v v8, (a0)
; CHECK-NEXT: ret
%a = load <32 x i8>, ptr %x
@@ -3326,12 +3324,12 @@ define void @mulhu_v16i16(ptr %x) {
; RV32-NEXT: vmv.s.x v8, a1
; RV32-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV32-NEXT: vmv.v.i v9, 0
-; RV32-NEXT: vmv1r.v v0, v8
-; RV32-NEXT: vmerge.vim v9, v9, 1, v0
-; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV32-NEXT: lui a1, %hi(.LCPI182_0)
; RV32-NEXT: addi a1, a1, %lo(.LCPI182_0)
; RV32-NEXT: vle16.v v14, (a1)
+; RV32-NEXT: vmv1r.v v0, v8
+; RV32-NEXT: vmerge.vim v9, v9, 1, v0
+; RV32-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; RV32-NEXT: vsext.vf2 v16, v9
; RV32-NEXT: vsrl.vv v16, v10, v16
; RV32-NEXT: vmulhu.vv v14, v16, v14
@@ -3361,27 +3359,27 @@ define void @mulhu_v16i16(ptr %x) {
; RV64-NEXT: vmv.v.i v10, 0
; RV64-NEXT: lui a1, 1048568
; RV64-NEXT: vmerge.vxm v10, v10, a1, v0
+; RV64-NEXT: lui a1, %hi(.LCPI182_0)
+; RV64-NEXT: addi a1, a1, %lo(.LCPI182_0)
+; RV64-NEXT: vle16.v v12, (a1)
; RV64-NEXT: li a1, 1
; RV64-NEXT: slli a1, a1, 48
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT: vmv.v.x v12, a1
+; RV64-NEXT: vmv.v.x v14, a1
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: lui a1, %hi(.LCPI182_0)
-; RV64-NEXT: addi a1, a1, %lo(.LCPI182_0)
-; RV64-NEXT: vle16.v v14, (a1)
-; RV64-NEXT: vsext.vf2 v16, v12
-; RV64-NEXT: vsrl.vv v12, v8, v16
-; RV64-NEXT: vmulhu.vv v12, v12, v14
-; RV64-NEXT: vsub.vv v8, v8, v12
-; RV64-NEXT: vmulhu.vv v8, v8, v10
-; RV64-NEXT: vadd.vv v8, v8, v12
+; RV64-NEXT: vsext.vf2 v16, v14
+; RV64-NEXT: vsrl.vv v14, v8, v16
+; RV64-NEXT: vmulhu.vv v12, v14, v12
; RV64-NEXT: lui a1, %hi(.LCPI182_1)
; RV64-NEXT: addi a1, a1, %lo(.LCPI182_1)
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
-; RV64-NEXT: vlse64.v v10, (a1), zero
+; RV64-NEXT: vlse64.v v14, (a1), zero
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vsext.vf2 v12, v10
-; RV64-NEXT: vsrl.vv v8, v8, v12
+; RV64-NEXT: vsub.vv v8, v8, v12
+; RV64-NEXT: vmulhu.vv v8, v8, v10
+; RV64-NEXT: vadd.vv v8, v8, v12
+; RV64-NEXT: vsext.vf2 v10, v14
+; RV64-NEXT: vsrl.vv v8, v8, v10
; RV64-NEXT: vse16.v v8, (a0)
; RV64-NEXT: ret
%a = load <16 x i16>, ptr %x
@@ -3433,23 +3431,24 @@ define void @mulhu_v4i64(ptr %x) {
; RV32-NEXT: vle32.v v10, (a1)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vmulhu.vv v10, v8, v10
-; RV32-NEXT: vsub.vv v8, v8, v10
; RV32-NEXT: lui a1, 524288
; RV32-NEXT: vmv.s.x v12, a1
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vmv.v.i v14, 0
; RV32-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV32-NEXT: vslideup.vi v14, v12, 5
+; RV32-NEXT: lui a1, %hi(.LCPI184_1)
+; RV32-NEXT: addi a1, a1, %lo(.LCPI184_1)
+; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
+; RV32-NEXT: vle8.v v12, (a1)
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV32-NEXT: vsub.vv v8, v8, v10
; RV32-NEXT: vmulhu.vv v8, v8, v14
; RV32-NEXT: vadd.vv v8, v8, v10
-; RV32-NEXT: lui a1, %hi(.LCPI184_1)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI184_1)
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT: vle8.v v10, (a1)
-; RV32-NEXT: vsext.vf4 v12, v10
+; RV32-NEXT: vsext.vf4 v10, v12
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV32-NEXT: vsrl.vv v8, v8, v12
+; RV32-NEXT: vsrl.vv v8, v8, v10
; RV32-NEXT: vse64.v v8, (a0)
; RV32-NEXT: ret
;
@@ -3457,19 +3456,19 @@ define void @mulhu_v4i64(ptr %x) {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
+; RV64-NEXT: lui a1, %hi(.LCPI184_0)
+; RV64-NEXT: addi a1, a1, %lo(.LCPI184_0)
+; RV64-NEXT: vle64.v v10, (a1)
; RV64-NEXT: li a1, -1
; RV64-NEXT: slli a1, a1, 63
-; RV64-NEXT: vmv.s.x v10, a1
-; RV64-NEXT: vmv.v.i v12, 0
+; RV64-NEXT: vmv.s.x v12, a1
+; RV64-NEXT: vmv.v.i v14, 0
; RV64-NEXT: vsetivli zero, 3, e64, m2, tu, ma
-; RV64-NEXT: vslideup.vi v12, v10, 2
-; RV64-NEXT: lui a1, %hi(.LCPI184_0)
-; RV64-NEXT: addi a1, a1, %lo(.LCPI184_0)
+; RV64-NEXT: vslideup.vi v14, v12, 2
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT: vle64.v v10, (a1)
; RV64-NEXT: vmulhu.vv v10, v8, v10
; RV64-NEXT: vsub.vv v8, v8, v10
-; RV64-NEXT: vmulhu.vv v8, v8, v12
+; RV64-NEXT: vmulhu.vv v8, v8, v14
; RV64-NEXT: vadd.vv v8, v8, v10
; RV64-NEXT: lui a1, 12320
; RV64-NEXT: addi a1, a1, 513
@@ -3488,14 +3487,13 @@ define void @mulhs_v32i8(ptr %x) {
; CHECK-LABEL: mulhs_v32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0)
-; CHECK-NEXT: vmv.v.i v10, 7
; CHECK-NEXT: lui a1, 304453
; CHECK-NEXT: addi a1, a1, -1452
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
+; CHECK-NEXT: vmv.v.i v10, 7
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
; CHECK-NEXT: li a1, -123
; CHECK-NEXT: vmv.v.x v12, a1
@@ -3615,19 +3613,19 @@ define void @mulhs_v4i64(ptr %x) {
;
; RV64-LABEL: mulhs_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: lui a1, 349525
; RV64-NEXT: addiw a1, a1, 1365
; RV64-NEXT: slli a2, a1, 32
; RV64-NEXT: add a1, a1, a2
-; RV64-NEXT: vmv.v.x v10, a1
-; RV64-NEXT: lui a1, %hi(.LCPI188_0)
-; RV64-NEXT: ld a1, %lo(.LCPI188_0)(a1)
+; RV64-NEXT: lui a2, %hi(.LCPI188_0)
+; RV64-NEXT: ld a2, %lo(.LCPI188_0)(a2)
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vle64.v v8, (a0)
; RV64-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64-NEXT: vmv.v.i v0, 5
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64-NEXT: vmerge.vxm v10, v10, a1, v0
+; RV64-NEXT: vmv.v.x v10, a1
+; RV64-NEXT: vmerge.vxm v10, v10, a2, v0
; RV64-NEXT: vmulh.vv v10, v8, v10
; RV64-NEXT: lui a1, 1044496
; RV64-NEXT: addi a1, a1, -256
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll
index eb95d86e3404..82e0760d593c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access-zve32x.ll
@@ -7,57 +7,63 @@
define <4 x i1> @load_large_vector(ptr %p) {
; ZVE32X-LABEL: load_large_vector:
; ZVE32X: # %bb.0:
-; ZVE32X-NEXT: ld a1, 56(a0)
-; ZVE32X-NEXT: ld a2, 32(a0)
-; ZVE32X-NEXT: ld a3, 24(a0)
-; ZVE32X-NEXT: ld a4, 48(a0)
-; ZVE32X-NEXT: ld a5, 8(a0)
-; ZVE32X-NEXT: ld a6, 0(a0)
-; ZVE32X-NEXT: xor a2, a3, a2
-; ZVE32X-NEXT: snez a2, a2
+; ZVE32X-NEXT: ld a1, 80(a0)
+; ZVE32X-NEXT: ld a2, 72(a0)
+; ZVE32X-NEXT: ld a3, 56(a0)
+; ZVE32X-NEXT: ld a4, 32(a0)
+; ZVE32X-NEXT: ld a5, 24(a0)
+; ZVE32X-NEXT: ld a6, 48(a0)
+; ZVE32X-NEXT: ld a7, 8(a0)
+; ZVE32X-NEXT: ld a0, 0(a0)
+; ZVE32X-NEXT: xor a4, a5, a4
+; ZVE32X-NEXT: snez a4, a4
; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; ZVE32X-NEXT: vmv.s.x v8, a2
+; ZVE32X-NEXT: vmv.s.x v8, a4
; ZVE32X-NEXT: vand.vi v8, v8, 1
; ZVE32X-NEXT: vmsne.vi v0, v8, 0
-; ZVE32X-NEXT: vmv.s.x v8, zero
-; ZVE32X-NEXT: vmerge.vim v9, v8, 1, v0
-; ZVE32X-NEXT: xor a2, a6, a5
-; ZVE32X-NEXT: snez a2, a2
-; ZVE32X-NEXT: vmv.s.x v10, a2
+; ZVE32X-NEXT: vmv.s.x v9, zero
+; ZVE32X-NEXT: vmerge.vim v8, v9, 1, v0
+; ZVE32X-NEXT: xor a0, a0, a7
+; ZVE32X-NEXT: snez a0, a0
+; ZVE32X-NEXT: vmv.s.x v10, a0
; ZVE32X-NEXT: vand.vi v10, v10, 1
; ZVE32X-NEXT: vmsne.vi v0, v10, 0
; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; ZVE32X-NEXT: vmv.v.i v10, 0
; ZVE32X-NEXT: vmerge.vim v11, v10, 1, v0
; ZVE32X-NEXT: vsetivli zero, 2, e8, mf4, tu, ma
-; ZVE32X-NEXT: vslideup.vi v11, v9, 1
+; ZVE32X-NEXT: vslideup.vi v11, v8, 1
; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; ZVE32X-NEXT: vmsne.vi v0, v11, 0
-; ZVE32X-NEXT: ld a2, 80(a0)
-; ZVE32X-NEXT: vmerge.vim v9, v10, 1, v0
-; ZVE32X-NEXT: xor a1, a4, a1
-; ZVE32X-NEXT: snez a1, a1
-; ZVE32X-NEXT: vmv.s.x v11, a1
+; ZVE32X-NEXT: xor a0, a6, a3
+; ZVE32X-NEXT: snez a0, a0
+; ZVE32X-NEXT: vmv.s.x v8, a0
; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; ZVE32X-NEXT: vand.vi v11, v11, 1
-; ZVE32X-NEXT: vmsne.vi v0, v11, 0
-; ZVE32X-NEXT: ld a0, 72(a0)
-; ZVE32X-NEXT: vmerge.vim v11, v8, 1, v0
+; ZVE32X-NEXT: vand.vi v8, v8, 1
+; ZVE32X-NEXT: vmsne.vi v8, v8, 0
+; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVE32X-NEXT: vmerge.vim v11, v10, 1, v0
+; ZVE32X-NEXT: vmv1r.v v0, v8
+; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; ZVE32X-NEXT: vmerge.vim v8, v9, 1, v0
; ZVE32X-NEXT: vsetivli zero, 3, e8, mf4, tu, ma
-; ZVE32X-NEXT: vslideup.vi v9, v11, 2
+; ZVE32X-NEXT: vslideup.vi v11, v8, 2
; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; ZVE32X-NEXT: vmsne.vi v0, v9, 0
-; ZVE32X-NEXT: vmerge.vim v9, v10, 1, v0
-; ZVE32X-NEXT: xor a0, a0, a2
-; ZVE32X-NEXT: snez a0, a0
-; ZVE32X-NEXT: vmv.s.x v10, a0
+; ZVE32X-NEXT: vmsne.vi v0, v11, 0
+; ZVE32X-NEXT: xor a1, a2, a1
+; ZVE32X-NEXT: snez a0, a1
+; ZVE32X-NEXT: vmv.s.x v8, a0
; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; ZVE32X-NEXT: vand.vi v10, v10, 1
-; ZVE32X-NEXT: vmsne.vi v0, v10, 0
-; ZVE32X-NEXT: vmerge.vim v8, v8, 1, v0
+; ZVE32X-NEXT: vand.vi v8, v8, 1
+; ZVE32X-NEXT: vmsne.vi v8, v8, 0
+; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
+; ZVE32X-NEXT: vmerge.vim v10, v10, 1, v0
+; ZVE32X-NEXT: vmv1r.v v0, v8
+; ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; ZVE32X-NEXT: vmerge.vim v8, v9, 1, v0
; ZVE32X-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; ZVE32X-NEXT: vslideup.vi v9, v8, 3
-; ZVE32X-NEXT: vmsne.vi v0, v9, 0
+; ZVE32X-NEXT: vslideup.vi v10, v8, 3
+; ZVE32X-NEXT: vmsne.vi v0, v10, 0
; ZVE32X-NEXT: ret
;
; ZVE64X-LABEL: load_large_vector:
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
index 99364264de82..178a920169ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleaved-access.ll
@@ -159,16 +159,16 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: .cfi_def_cfa_offset 16
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: li a3, 54
+; RV32-NEXT: li a3, 82
; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: sub sp, sp, a2
-; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x36, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 54 * vlenb
+; RV32-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xd2, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 82 * vlenb
; RV32-NEXT: addi a3, a1, 256
; RV32-NEXT: li a2, 32
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vle32.v v16, (a3)
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 21
+; RV32-NEXT: li a4, 57
; RV32-NEXT: mul a3, a3, a4
; RV32-NEXT: add a3, sp, a3
; RV32-NEXT: addi a3, a3, 16
@@ -177,30 +177,27 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; RV32-NEXT: vslideup.vi v8, v16, 4
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: slli a5, a4, 3
-; RV32-NEXT: add a4, a5, a4
+; RV32-NEXT: li a5, 41
+; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs4r.v v8, (a4) # Unknown-size Folded Spill
; RV32-NEXT: lui a4, 12
-; RV32-NEXT: vmv.s.x v0, a4
-; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: add a4, sp, a4
-; RV32-NEXT: addi a4, a4, 16
-; RV32-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vmv.s.x v1, a4
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v16, v16, 16
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 37
-; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: slli a5, a4, 6
+; RV32-NEXT: add a4, a5, a4
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; RV32-NEXT: vslideup.vi v8, v16, 10, v0.t
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: slli a5, a4, 4
-; RV32-NEXT: add a4, a5, a4
+; RV32-NEXT: li a5, 45
+; RV32-NEXT: mul a4, a4, a5
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs4r.v v8, (a4) # Unknown-size Folded Spill
@@ -209,391 +206,429 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
; RV32-NEXT: vle16.v v8, (a4)
; RV32-NEXT: csrr a4, vlenb
-; RV32-NEXT: li a5, 13
-; RV32-NEXT: mul a4, a4, a5
+; RV32-NEXT: slli a5, a4, 5
+; RV32-NEXT: add a4, a5, a4
+; RV32-NEXT: add a4, sp, a4
+; RV32-NEXT: addi a4, a4, 16
+; RV32-NEXT: vs4r.v v8, (a4) # Unknown-size Folded Spill
+; RV32-NEXT: lui a4, %hi(.LCPI6_1)
+; RV32-NEXT: addi a4, a4, %lo(.LCPI6_1)
+; RV32-NEXT: lui a5, 1
+; RV32-NEXT: vle16.v v8, (a4)
+; RV32-NEXT: csrr a4, vlenb
+; RV32-NEXT: li a6, 25
+; RV32-NEXT: mul a4, a4, a6
; RV32-NEXT: add a4, sp, a4
; RV32-NEXT: addi a4, a4, 16
; RV32-NEXT: vs4r.v v8, (a4) # Unknown-size Folded Spill
-; RV32-NEXT: vle32.v v24, (a1)
+; RV32-NEXT: vle32.v v8, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a4, 45
+; RV32-NEXT: li a4, 73
; RV32-NEXT: mul a1, a1, a4
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: lui a1, %hi(.LCPI6_1)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_1)
-; RV32-NEXT: lui a4, 1
-; RV32-NEXT: addi a4, a4, -64
-; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vle32.v v24, (a3)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a5, a1, 2
-; RV32-NEXT: add a1, a5, a1
+; RV32-NEXT: li a3, 49
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vle32.v v16, (a3)
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: addi a1, a5, -64
+; RV32-NEXT: vmv.s.x v0, a1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 29
+; RV32-NEXT: li a3, 37
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vmv.s.x v2, a4
+; RV32-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a3, a1, 5
+; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl4r.v v4, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v24, v4
-; RV32-NEXT: vmv1r.v v0, v2
+; RV32-NEXT: vrgatherei16.vv v16, v8, v4
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 2
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: li a3, 25
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v24, v0.t
-; RV32-NEXT: vsetivli zero, 12, e32, m4, tu, ma
+; RV32-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v16, v24, v8, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 4
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: li a3, 45
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v12, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vmv.v.v v12, v8
+; RV32-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 12, e32, m4, tu, ma
+; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 4
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: li a3, 45
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
+; RV32-NEXT: li a3, 57
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vmv4r.v v16, v8
-; RV32-NEXT: vslideup.vi v8, v16, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vslideup.vi v12, v8, 2
; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 21
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl1r.v v3, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vmv1r.v v0, v3
+; RV32-NEXT: vs1r.v v1, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 37
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a3, a1, 6
+; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslideup.vi v8, v16, 8, v0.t
-; RV32-NEXT: vmv.v.v v20, v8
+; RV32-NEXT: vslideup.vi v12, v16, 8, v0.t
+; RV32-NEXT: vmv.v.v v20, v12
; RV32-NEXT: lui a1, %hi(.LCPI6_2)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_2)
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: lui a3, %hi(.LCPI6_3)
+; RV32-NEXT: addi a3, a3, %lo(.LCPI6_3)
+; RV32-NEXT: lui a4, %hi(.LCPI6_4)
+; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; RV32-NEXT: vle16.v v4, (a1)
+; RV32-NEXT: vle16.v v16, (a3)
+; RV32-NEXT: addi a1, a4, %lo(.LCPI6_4)
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v2, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
+; RV32-NEXT: li a3, 73
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: lui a1, %hi(.LCPI6_3)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_3)
-; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
+; RV32-NEXT: vrgatherei16.vv v24, v8, v4
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 2
-; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: li a3, 37
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 45
+; RV32-NEXT: li a3, 49
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v24, v8, v16, v0.t
+; RV32-NEXT: vsetivli zero, 12, e32, m4, tu, ma
+; RV32-NEXT: vmv.v.v v20, v24
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
+; RV32-NEXT: li a3, 37
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v4, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v24, v4
-; RV32-NEXT: vmv1r.v v0, v2
+; RV32-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 29
+; RV32-NEXT: li a3, 57
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vrgatherei16.vv v16, v24, v2
+; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 2
+; RV32-NEXT: slli a3, a1, 6
; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v4, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v24, v4, v0.t
-; RV32-NEXT: vsetivli zero, 12, e32, m4, tu, ma
-; RV32-NEXT: vmv.v.v v20, v8
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslideup.vi v16, v8, 6, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 2
+; RV32-NEXT: slli a3, a1, 5
; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: lui a1, %hi(.LCPI6_4)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_4)
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
-; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
-; RV32-NEXT: mul a1, a1, a3
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v12, v24, v8
-; RV32-NEXT: vmv1r.v v0, v3
-; RV32-NEXT: vslideup.vi v12, v16, 6, v0.t
-; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
-; RV32-NEXT: mul a1, a1, a3
-; RV32-NEXT: add a1, sp, a1
-; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_5)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_5)
+; RV32-NEXT: lui a3, %hi(.LCPI6_6)
+; RV32-NEXT: addi a3, a3, %lo(.LCPI6_6)
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vle16.v v24, (a1)
-; RV32-NEXT: lui a1, %hi(.LCPI6_6)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_6)
-; RV32-NEXT: li a3, 960
-; RV32-NEXT: vle16.v v4, (a1)
-; RV32-NEXT: vmv.s.x v0, a3
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v16, (a1)
+; RV32-NEXT: vle16.v v4, (a3)
+; RV32-NEXT: li a1, 960
+; RV32-NEXT: vmv.s.x v0, a1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 45
+; RV32-NEXT: li a3, 13
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v24
+; RV32-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 29
+; RV32-NEXT: li a3, 73
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v24, v4, v0.t
-; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
+; RV32-NEXT: vrgatherei16.vv v8, v24, v16
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
+; RV32-NEXT: li a3, 49
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v12, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vmv.v.v v12, v8
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v8, v16, v4, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 13
+; RV32-NEXT: li a3, 25
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_7)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_7)
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: lui a3, %hi(.LCPI6_8)
+; RV32-NEXT: addi a3, a3, %lo(.LCPI6_8)
+; RV32-NEXT: lui a4, %hi(.LCPI6_9)
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: addi a1, a4, %lo(.LCPI6_9)
+; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; RV32-NEXT: vle16.v v24, (a3)
+; RV32-NEXT: vle16.v v28, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
+; RV32-NEXT: li a3, 57
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v4, v16, v8
-; RV32-NEXT: vmv1r.v v0, v3
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vrgatherei16.vv v4, v0, v8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 37
+; RV32-NEXT: li a3, 21
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslideup.vi v4, v8, 4, v0.t
-; RV32-NEXT: lui a1, %hi(.LCPI6_8)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_8)
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vle16.v v0, (a1)
-; RV32-NEXT: lui a1, %hi(.LCPI6_9)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_9)
-; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a3, a1, 6
+; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslideup.vi v4, v8, 4, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 45
+; RV32-NEXT: li a3, 21
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v0
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vs4r.v v4, (a1) # Unknown-size Folded Spill
; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 73
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v24, v16, v0.t
-; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
-; RV32-NEXT: vmv.v.v v4, v8
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
+; RV32-NEXT: vrgatherei16.vv v8, v0, v24
; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 13
+; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v4, (a1) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
+; RV32-NEXT: li a3, 13
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vslideup.vi v12, v8, 6
+; RV32-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_10)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_10)
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
; RV32-NEXT: vle16.v v8, (a1)
; RV32-NEXT: lui a1, 15
-; RV32-NEXT: vmv.s.x v24, a1
-; RV32-NEXT: vmv1r.v v0, v24
+; RV32-NEXT: vmv.s.x v3, a1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 37
+; RV32-NEXT: li a3, 57
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vslideup.vi v12, v16, 6
+; RV32-NEXT: vmv1r.v v0, v3
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a3, a1, 6
+; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vrgatherei16.vv v12, v16, v8, v0.t
-; RV32-NEXT: vmv.v.v v28, v12
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 57
+; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_11)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_11)
+; RV32-NEXT: lui a3, %hi(.LCPI6_12)
+; RV32-NEXT: addi a3, a3, %lo(.LCPI6_12)
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vle16.v v0, (a1)
-; RV32-NEXT: lui a1, %hi(.LCPI6_12)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_12)
-; RV32-NEXT: li a3, 1008
-; RV32-NEXT: vle16.v v4, (a1)
-; RV32-NEXT: vmv.s.x v25, a3
-; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vs1r.v v25, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vle16.v v8, (a1)
+; RV32-NEXT: vle16.v v12, (a3)
+; RV32-NEXT: li a1, 1008
+; RV32-NEXT: vmv.s.x v0, a1
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 45
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 73
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v0
-; RV32-NEXT: vmv1r.v v0, v25
+; RV32-NEXT: vrgatherei16.vv v24, v16, v8
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 29
+; RV32-NEXT: li a3, 49
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v4, v0.t
-; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
-; RV32-NEXT: vmv.v.v v28, v8
+; RV32-NEXT: vrgatherei16.vv v24, v16, v12, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 21
-; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: slli a3, a1, 2
+; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vs4r.v v28, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; RV32-NEXT: lui a1, %hi(.LCPI6_13)
; RV32-NEXT: addi a1, a1, %lo(.LCPI6_13)
-; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: lui a3, %hi(.LCPI6_14)
+; RV32-NEXT: addi a3, a3, %lo(.LCPI6_14)
+; RV32-NEXT: lui a4, %hi(.LCPI6_15)
+; RV32-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV32-NEXT: vle16.v v20, (a1)
+; RV32-NEXT: addi a1, a4, %lo(.LCPI6_15)
+; RV32-NEXT: vsetvli zero, a2, e16, m4, ta, ma
+; RV32-NEXT: vle16.v v24, (a3)
; RV32-NEXT: vle16.v v8, (a1)
-; RV32-NEXT: vmv1r.v v0, v24
+; RV32-NEXT: addi a1, sp, 16
+; RV32-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
+; RV32-NEXT: vmv1r.v v0, v3
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 41
+; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl4r.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a3, a1, 3
+; RV32-NEXT: slli a3, a1, 6
; RV32-NEXT: add a1, a3, a1
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; RV32-NEXT: vrgatherei16.vv v16, v8, v20, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a3, 37
+; RV32-NEXT: slli a3, a1, 5
+; RV32-NEXT: add a1, a3, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl4r.v v20, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 25
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v24, v16, v8, v0.t
-; RV32-NEXT: lui a1, %hi(.LCPI6_14)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_14)
+; RV32-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
+; RV32-NEXT: vmv.v.v v20, v8
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a3, 73
+; RV32-NEXT: mul a1, a1, a3
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, mu
-; RV32-NEXT: vle16.v v16, (a1)
-; RV32-NEXT: lui a1, %hi(.LCPI6_15)
-; RV32-NEXT: addi a1, a1, %lo(.LCPI6_15)
-; RV32-NEXT: vle16.v v28, (a1)
+; RV32-NEXT: vrgatherei16.vv v8, v0, v24
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 45
+; RV32-NEXT: slli a1, a1, 2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 49
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v0, v16
+; RV32-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV32-NEXT: addi a1, sp, 16
-; RV32-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vl4r.v v4, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vrgatherei16.vv v8, v24, v4, v0.t
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: li a2, 29
+; RV32-NEXT: li a2, 21
; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
-; RV32-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV32-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
+; RV32-NEXT: vl4r.v v24, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 13
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vsetivli zero, 10, e32, m4, tu, ma
-; RV32-NEXT: vmv.v.v v24, v8
+; RV32-NEXT: vmv.v.v v24, v0
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: li a2, 57
+; RV32-NEXT: mul a1, a1, a2
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl4r.v v28, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: csrr a1, vlenb
+; RV32-NEXT: slli a2, a1, 2
+; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: add a1, sp, a1
+; RV32-NEXT: addi a1, a1, 16
+; RV32-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV32-NEXT: vmv.v.v v28, v0
+; RV32-NEXT: vmv.v.v v16, v8
; RV32-NEXT: addi a1, a0, 320
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT: vse32.v v24, (a1)
+; RV32-NEXT: vse32.v v16, (a1)
; RV32-NEXT: addi a1, a0, 256
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: li a3, 21
-; RV32-NEXT: mul a2, a2, a3
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vse32.v v8, (a1)
+; RV32-NEXT: vse32.v v28, (a1)
; RV32-NEXT: addi a1, a0, 192
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vse32.v v8, (a1)
+; RV32-NEXT: vse32.v v24, (a1)
; RV32-NEXT: addi a1, a0, 128
-; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: li a3, 13
-; RV32-NEXT: mul a2, a2, a3
-; RV32-NEXT: add a2, sp, a2
-; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
-; RV32-NEXT: vse32.v v8, (a1)
+; RV32-NEXT: vse32.v v20, (a1)
; RV32-NEXT: addi a1, a0, 64
; RV32-NEXT: csrr a2, vlenb
-; RV32-NEXT: slli a3, a2, 2
-; RV32-NEXT: add a2, a3, a2
+; RV32-NEXT: li a3, 37
+; RV32-NEXT: mul a2, a2, a3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 16
; RV32-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
; RV32-NEXT: vse32.v v8, (a1)
; RV32-NEXT: csrr a1, vlenb
-; RV32-NEXT: slli a2, a1, 4
-; RV32-NEXT: add a1, a2, a1
+; RV32-NEXT: li a2, 45
+; RV32-NEXT: mul a1, a1, a2
; RV32-NEXT: add a1, sp, a1
; RV32-NEXT: addi a1, a1, 16
; RV32-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
; RV32-NEXT: vse32.v v8, (a0)
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 54
+; RV32-NEXT: li a1, 82
; RV32-NEXT: mul a0, a0, a1
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 16
@@ -604,372 +639,422 @@ define {<8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>, <8 x i64>} @load_
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: li a3, 56
+; RV64-NEXT: li a3, 74
; RV64-NEXT: mul a2, a2, a3
; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: .cfi_escape 0x0f, 0x0e, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0xca, 0x00, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 74 * vlenb
; RV64-NEXT: addi a2, a1, 256
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 5
+; RV64-NEXT: li a3, 25
+; RV64-NEXT: mul a2, a2, a3
; RV64-NEXT: add a2, sp, a2
; RV64-NEXT: addi a2, a2, 16
; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
; RV64-NEXT: addi a2, a1, 128
-; RV64-NEXT: vle64.v v8, (a2)
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: li a3, 40
-; RV64-NEXT: mul a2, a2, a3
-; RV64-NEXT: add a2, sp, a2
-; RV64-NEXT: addi a2, a2, 16
-; RV64-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; RV64-NEXT: vle64.v v24, (a1)
+; RV64-NEXT: vle64.v v8, (a1)
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a3, a1, 6
+; RV64-NEXT: add a1, a3, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
-; RV64-NEXT: vrgather.vi v8, v16, 4
+; RV64-NEXT: vrgather.vi v12, v16, 4
; RV64-NEXT: li a1, 128
-; RV64-NEXT: vmv.s.x v4, a1
+; RV64-NEXT: vmv.s.x v8, a1
; RV64-NEXT: vsetivli zero, 8, e64, m8, ta, ma
; RV64-NEXT: vslidedown.vi v16, v16, 8
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 24
-; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: li a3, 49
+; RV64-NEXT: mul a1, a1, a3
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v0, v8
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT: vmv1r.v v0, v4
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 20
-; RV64-NEXT: mul a1, a1, a2
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v4, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vrgather.vi v8, v16, 2, v0.t
-; RV64-NEXT: vmv.v.v v20, v8
+; RV64-NEXT: vrgather.vi v12, v16, 2, v0.t
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vid.v v10
; RV64-NEXT: li a1, 6
-; RV64-NEXT: vid.v v8
-; RV64-NEXT: vmul.vx v6, v8, a1
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; RV64-NEXT: vrgatherei16.vv v8, v24, v6
+; RV64-NEXT: vmul.vx v2, v10, a1
+; RV64-NEXT: li a1, 56
+; RV64-NEXT: vle64.v v16, (a2)
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: li a3, 57
+; RV64-NEXT: mul a2, a2, a3
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: vmv.s.x v7, a1
+; RV64-NEXT: vadd.vi v10, v2, -16
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 48
-; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: slli a2, a1, 6
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: li a1, 56
-; RV64-NEXT: vmv.s.x v5, a1
-; RV64-NEXT: vadd.vi v16, v6, -16
+; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT: vmv1r.v v0, v5
+; RV64-NEXT: vrgatherei16.vv v16, v24, v2
+; RV64-NEXT: vmv1r.v v0, v7
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 40
+; RV64-NEXT: li a2, 57
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v24, v16, v0.t
+; RV64-NEXT: vrgatherei16.vv v16, v24, v10, v0.t
; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v20, v8
+; RV64-NEXT: vmv.v.v v12, v16
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 4
+; RV64-NEXT: li a2, 21
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: li a2, 25
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v24, v16, 5
-; RV64-NEXT: vmv1r.v v0, v4
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vrgather.vi v12, v16, 5
+; RV64-NEXT: vmv1r.v v0, v8
+; RV64-NEXT: vmv1r.v v6, v8
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 24
+; RV64-NEXT: li a2, 49
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v24, v16, 3, v0.t
+; RV64-NEXT: vrgather.vi v12, v16, 3, v0.t
+; RV64-NEXT: vmv.v.v v28, v12
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v28, v6, 1
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vadd.vi v24, v2, 1
+; RV64-NEXT: vadd.vi v26, v2, -15
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 48
-; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: slli a2, a1, 6
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v16, v28
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v28, v6, -15
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT: vmv1r.v v0, v5
+; RV64-NEXT: vrgatherei16.vv v16, v8, v24
+; RV64-NEXT: vmv1r.v v0, v7
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 40
+; RV64-NEXT: li a2, 57
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v16, v28, v0.t
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgatherei16.vv v16, v8, v26, v0.t
; RV64-NEXT: vsetivli zero, 6, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v24, v8
+; RV64-NEXT: vmv.v.v v28, v16
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 12
+; RV64-NEXT: slli a2, a1, 4
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs4r.v v28, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: lui a1, 16
+; RV64-NEXT: addi a1, a1, 7
+; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT: vmv.v.i v9, 6
+; RV64-NEXT: vmv.v.x v10, a1
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 25
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs4r.v v24, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vmv2r.v v26, v6
-; RV64-NEXT: vadd.vi v24, v6, 2
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; RV64-NEXT: vrgatherei16.vv v12, v16, v9
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 48
+; RV64-NEXT: li a2, 45
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v0, v24
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vrgatherei16.vv v12, v16, v10
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 41
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vmv4r.v v8, v16
+; RV64-NEXT: vrgather.vi v12, v16, 2
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 37
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vrgather.vi v12, v16, 3
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 5
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
; RV64-NEXT: li a1, 24
-; RV64-NEXT: vmv.s.x v0, a1
+; RV64-NEXT: vmv.s.x v1, a1
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vadd.vi v24, v2, 2
+; RV64-NEXT: vadd.vi v4, v2, -14
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: slli a2, a1, 6
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vadd.vi v24, v26, -14
-; RV64-NEXT: vmv2r.v v6, v26
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; RV64-NEXT: vrgatherei16.vv v8, v16, v24, v0.t
-; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT: vmv.v.i v12, 6
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vrgatherei16.vv v8, v16, v24
+; RV64-NEXT: vmv1r.v v0, v1
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: li a2, 57
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v20, v24, v12
+; RV64-NEXT: vrgatherei16.vv v8, v24, v4, v0.t
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 20
+; RV64-NEXT: li a2, 25
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v0, v6
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 24
+; RV64-NEXT: li a2, 49
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v20, v24, 4, v0.t
-; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v20, v8
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 3
+; RV64-NEXT: li a2, 45
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vmv2r.v v10, v6
+; RV64-NEXT: vl4r.v v20, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vrgather.vi v20, v16, 4, v0.t
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 6
+; RV64-NEXT: li a2, 45
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs2r.v v6, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vadd.vi v8, v6, 3
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vadd.vi v4, v2, 3
+; RV64-NEXT: vadd.vi v8, v2, -13
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 48
-; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v16, v0, v8
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v28, v10, -13
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vs2r.v v8, (a1) # Unknown-size Folded Spill
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
+; RV64-NEXT: slli a2, a1, 6
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vrgatherei16.vv v8, v16, v4
+; RV64-NEXT: vmv1r.v v0, v1
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 40
-; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v16, v8, v28, v0.t
-; RV64-NEXT: lui a1, 16
-; RV64-NEXT: addi a1, a1, 7
-; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT: vmv.v.x v12, a1
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vl2r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgatherei16.vv v8, v24, v16, v0.t
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: slli a2, a1, 3
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vmv4r.v v8, v0
-; RV64-NEXT: vrgatherei16.vv v20, v0, v12
+; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vmv1r.v v0, v6
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 20
+; RV64-NEXT: li a2, 49
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v20, v24, 5, v0.t
-; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v20, v16
+; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 20
+; RV64-NEXT: li a2, 41
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs4r.v v20, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: lui a1, 96
-; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; RV64-NEXT: vmv.v.x v12, a1
+; RV64-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
-; RV64-NEXT: li a1, 192
-; RV64-NEXT: vmv.s.x v0, a1
+; RV64-NEXT: vrgather.vi v8, v24, 5, v0.t
; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 41
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vrgather.vi v28, v8, 2
-; RV64-NEXT: vrgatherei16.vv v28, v24, v12, v0.t
-; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: lui a1, 96
+; RV64-NEXT: li a2, 192
+; RV64-NEXT: vmv.s.x v28, a2
+; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-NEXT: vmv.v.x v8, a1
+; RV64-NEXT: vmv1r.v v0, v28
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 6
+; RV64-NEXT: li a2, 37
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl2r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vadd.vi v16, v24, 4
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vl4r.v v12, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vrgatherei16.vv v12, v24, v8, v0.t
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 48
+; RV64-NEXT: li a2, 37
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v0, v16
-; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; RV64-NEXT: vs4r.v v12, (a1) # Unknown-size Folded Spill
; RV64-NEXT: li a1, 28
; RV64-NEXT: vmv.s.x v0, a1
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: vadd.vi v26, v24, -12
+; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
+; RV64-NEXT: vadd.vi v30, v2, 4
+; RV64-NEXT: vadd.vi v6, v2, -12
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 6
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vrgatherei16.vv v16, v8, v30
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 40
+; RV64-NEXT: li a2, 57
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v16, v26, v0.t
-; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v28, v8
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vrgatherei16.vv v16, v8, v6, v0.t
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vs4r.v v28, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: lui a1, 112
; RV64-NEXT: addi a1, a1, 1
; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-NEXT: vmv.v.x v12, a1
+; RV64-NEXT: vmv1r.v v0, v28
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 5
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl4r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vrgatherei16.vv v16, v24, v12, v0.t
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 5
+; RV64-NEXT: slli a2, a1, 5
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgather.vi v8, v16, 3
+; RV64-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 45
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vl4r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 24
+; RV64-NEXT: li a2, 25
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v8, v16, v12, v0.t
+; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
+; RV64-NEXT: vmv.v.v v16, v24
+; RV64-NEXT: vmv2r.v v8, v2
; RV64-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v12, v24, 5
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vadd.vi v12, v2, 5
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 48
-; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: slli a2, a1, 6
+; RV64-NEXT: add a1, a2, a1
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v16, v0, v12
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; RV64-NEXT: vrgatherei16.vv v24, v0, v12
; RV64-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; RV64-NEXT: vadd.vi v12, v24, -11
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vadd.vi v2, v8, -11
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: li a2, 40
+; RV64-NEXT: li a2, 57
; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; RV64-NEXT: vrgatherei16.vv v16, v24, v12, v0.t
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; RV64-NEXT: vrgatherei16.vv v24, v8, v2, v0.t
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 41
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl4r.v v12, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 3
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetivli zero, 5, e64, m4, tu, ma
-; RV64-NEXT: vmv.v.v v8, v16
+; RV64-NEXT: vmv.v.v v12, v0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: li a2, 37
+; RV64-NEXT: mul a1, a1, a2
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl4r.v v20, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vmv.v.v v20, v0
+; RV64-NEXT: csrr a1, vlenb
+; RV64-NEXT: slli a2, a1, 5
+; RV64-NEXT: add a1, a2, a1
+; RV64-NEXT: add a1, sp, a1
+; RV64-NEXT: addi a1, a1, 16
+; RV64-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vmv.v.v v8, v24
; RV64-NEXT: addi a1, a0, 320
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vse64.v v8, (a1)
; RV64-NEXT: addi a1, a0, 256
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 1
-; RV64-NEXT: add a2, sp, a2
-; RV64-NEXT: addi a2, a2, 16
-; RV64-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vse64.v v8, (a1)
+; RV64-NEXT: vse64.v v20, (a1)
; RV64-NEXT: addi a1, a0, 192
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: li a3, 20
-; RV64-NEXT: mul a2, a2, a3
-; RV64-NEXT: add a2, sp, a2
-; RV64-NEXT: addi a2, a2, 16
-; RV64-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vse64.v v8, (a1)
+; RV64-NEXT: vse64.v v12, (a1)
; RV64-NEXT: addi a1, a0, 128
-; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 3
-; RV64-NEXT: add a2, sp, a2
-; RV64-NEXT: addi a2, a2, 16
-; RV64-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
-; RV64-NEXT: vse64.v v8, (a1)
+; RV64-NEXT: vse64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 64
; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: li a3, 12
-; RV64-NEXT: mul a2, a2, a3
+; RV64-NEXT: slli a3, a2, 4
+; RV64-NEXT: add a2, a3, a2
; RV64-NEXT: add a2, sp, a2
; RV64-NEXT: addi a2, a2, 16
; RV64-NEXT: vl4r.v v8, (a2) # Unknown-size Folded Reload
; RV64-NEXT: vse64.v v8, (a1)
; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: slli a1, a1, 4
+; RV64-NEXT: li a2, 21
+; RV64-NEXT: mul a1, a1, a2
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl4r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vse64.v v8, (a0)
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: li a1, 56
+; RV64-NEXT: li a1, 74
; RV64-NEXT: mul a0, a0, a1
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
index d55683e653d2..9463267d0b0e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-llrint.ll
@@ -57,17 +57,17 @@ define <2 x i64> @llrint_v2i64_v2f32(<2 x float> %x) {
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 16
; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: csrr a0, vlenb
@@ -118,50 +118,50 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: csrr a0, vlenb
@@ -182,17 +182,17 @@ define <3 x i64> @llrint_v3i64_v3f32(<3 x float> %x) {
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v9, v8, 2
; RV64-NEXT: vfmv.f.s fa5, v9
; RV64-NEXT: fcvt.l.s a0, fa5
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vfmv.f.s fa5, v8
; RV64-NEXT: fcvt.l.s a0, fa5
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v8, v10, a0
; RV64-NEXT: ret
%a = call <3 x i64> @llvm.llrint.v3i64.v3f32(<3 x float> %x)
@@ -224,50 +224,50 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: csrr a0, vlenb
@@ -288,17 +288,17 @@ define <4 x i64> @llrint_v4i64_v4f32(<4 x float> %x) {
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v9, v8, 2
; RV64-NEXT: vfmv.f.s fa5, v9
; RV64-NEXT: fcvt.l.s a0, fa5
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vfmv.f.s fa5, v8
; RV64-NEXT: fcvt.l.s a0, fa5
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v8, v10, a0
; RV64-NEXT: ret
%a = call <4 x i64> @llvm.llrint.v4i64.v4f32(<4 x float> %x)
@@ -328,57 +328,57 @@ define <8 x i64> @llrint_v8i64_v8f32(<8 x float> %x) {
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 68(sp)
; RV32-NEXT: sw a0, 64(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 7
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 124(sp)
; RV32-NEXT: sw a0, 120(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 6
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 116(sp)
; RV32-NEXT: sw a0, 112(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 5
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 108(sp)
; RV32-NEXT: sw a0, 104(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 4
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 100(sp)
; RV32-NEXT: sw a0, 96(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 92(sp)
; RV32-NEXT: sw a0, 88(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 84(sp)
; RV32-NEXT: sw a0, 80(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 192
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
@@ -502,64 +502,64 @@ define <16 x i64> @llrint_v16i64_v16f32(<16 x float> %x) {
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 196(sp)
; RV32-NEXT: sw a0, 192(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 132(sp)
; RV32-NEXT: sw a0, 128(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 156(sp)
; RV32-NEXT: sw a0, 152(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 148(sp)
; RV32-NEXT: sw a0, 144(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 140(sp)
; RV32-NEXT: sw a0, 136(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 7
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 188(sp)
; RV32-NEXT: sw a0, 184(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 6
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 180(sp)
; RV32-NEXT: sw a0, 176(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 5
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
; RV32-NEXT: sw a1, 172(sp)
; RV32-NEXT: sw a0, 168(sp)
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: addi a0, sp, 384
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 4
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrintf
@@ -711,17 +711,17 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 16
; RV32-NEXT: vl1r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: csrr a0, vlenb
@@ -733,13 +733,12 @@ define <2 x i64> @llrint_v2i64_v2f64(<2 x double> %x) {
;
; RV64-LABEL: llrint_v2i64_v2f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vslidedown.vi v9, v8, 1
; RV64-NEXT: vfmv.f.s fa5, v9
; RV64-NEXT: fcvt.l.d a0, fa5
; RV64-NEXT: vfmv.f.s fa5, v8
; RV64-NEXT: fcvt.l.d a1, fa5
-; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vmv.v.x v8, a1
; RV64-NEXT: vslide1down.vx v8, v8, a0
; RV64-NEXT: ret
@@ -772,50 +771,50 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs2r.v v8, (a0) # Unknown-size Folded Spill
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: addi a2, sp, 16
; RV32-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: vslide1down.vx v8, v8, a1
; RV32-NEXT: csrr a0, vlenb
@@ -836,17 +835,13 @@ define <4 x i64> @llrint_v4i64_v4f64(<4 x double> %x) {
; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v12, v8, 2
; RV64-NEXT: vfmv.f.s fa5, v12
; RV64-NEXT: fcvt.l.d a0, fa5
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 3
; RV64-NEXT: vfmv.f.s fa5, v8
; RV64-NEXT: fcvt.l.d a0, fa5
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vslide1down.vx v8, v10, a0
; RV64-NEXT: ret
%a = call <4 x i64> @llvm.llrint.v4i64.v4f64(<4 x double> %x)
@@ -890,32 +885,32 @@ define <8 x i64> @llrint_v8i64_v8f64(<8 x double> %x) {
; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 164(sp)
; RV32-NEXT: sw a0, 160(sp)
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: addi a0, sp, 256
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 132(sp)
; RV32-NEXT: sw a0, 128(sp)
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: addi a0, sp, 256
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 1
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 140(sp)
; RV32-NEXT: sw a0, 136(sp)
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: addi a0, sp, 256
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
; RV32-NEXT: sw a1, 156(sp)
; RV32-NEXT: sw a0, 152(sp)
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: addi a0, sp, 256
; RV32-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 2
; RV32-NEXT: vfmv.f.s fa0, v8
; RV32-NEXT: call llrint
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
index e2075e074179..9b0944e7e2f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-lrint.ll
@@ -39,26 +39,24 @@ declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float>)
define <2 x iXLen> @lrint_v2f32(<2 x float> %x) {
; RV32-LABEL: lrint_v2f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vfmv.f.s fa5, v9
; RV32-NEXT: fcvt.w.s a0, fa5
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.s a1, fa5
-; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vmv.v.x v8, a1
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: ret
;
; RV64-i32-LABEL: lrint_v2f32:
; RV64-i32: # %bb.0:
-; RV64-i32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; RV64-i32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
; RV64-i32-NEXT: vfmv.f.s fa5, v9
; RV64-i32-NEXT: fcvt.l.s a0, fa5
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.s a1, fa5
-; RV64-i32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-i32-NEXT: vmv.v.x v8, a1
; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
; RV64-i32-NEXT: ret
@@ -83,13 +81,12 @@ declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float>)
define <3 x iXLen> @lrint_v3f32(<3 x float> %x) {
; RV32-LABEL: lrint_v3f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vfmv.f.s fa5, v9
; RV32-NEXT: fcvt.w.s a0, fa5
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.s a1, fa5
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.x v9, a1
; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vslidedown.vi v10, v8, 2
@@ -104,13 +101,12 @@ define <3 x iXLen> @lrint_v3f32(<3 x float> %x) {
;
; RV64-i32-LABEL: lrint_v3f32:
; RV64-i32: # %bb.0:
-; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
; RV64-i32-NEXT: vfmv.f.s fa5, v9
; RV64-i32-NEXT: fcvt.l.s a0, fa5
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.s a1, fa5
-; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-i32-NEXT: vmv.v.x v9, a1
; RV64-i32-NEXT: vslide1down.vx v9, v9, a0
; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
@@ -134,17 +130,17 @@ define <3 x iXLen> @lrint_v3f32(<3 x float> %x) {
; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-i64-NEXT: vmv.v.x v10, a1
; RV64-i64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-i64-NEXT: vslidedown.vi v9, v8, 2
; RV64-i64-NEXT: vfmv.f.s fa5, v9
; RV64-i64-NEXT: fcvt.l.s a0, fa5
-; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-i64-NEXT: vslidedown.vi v8, v8, 3
; RV64-i64-NEXT: vfmv.f.s fa5, v8
; RV64-i64-NEXT: fcvt.l.s a0, fa5
-; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i64-NEXT: vslide1down.vx v8, v10, a0
; RV64-i64-NEXT: ret
%a = call <3 x iXLen> @llvm.lrint.v3iXLen.v3f32(<3 x float> %x)
@@ -155,13 +151,12 @@ declare <3 x iXLen> @llvm.lrint.v3iXLen.v3f32(<3 x float>)
define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
; RV32-LABEL: lrint_v4f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vslidedown.vi v9, v8, 1
; RV32-NEXT: vfmv.f.s fa5, v9
; RV32-NEXT: fcvt.w.s a0, fa5
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.s a1, fa5
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.x v9, a1
; RV32-NEXT: vslide1down.vx v9, v9, a0
; RV32-NEXT: vslidedown.vi v10, v8, 2
@@ -176,13 +171,12 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
;
; RV64-i32-LABEL: lrint_v4f32:
; RV64-i32: # %bb.0:
-; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-i32-NEXT: vslidedown.vi v9, v8, 1
; RV64-i32-NEXT: vfmv.f.s fa5, v9
; RV64-i32-NEXT: fcvt.l.s a0, fa5
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.s a1, fa5
-; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-i32-NEXT: vmv.v.x v9, a1
; RV64-i32-NEXT: vslide1down.vx v9, v9, a0
; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
@@ -206,17 +200,17 @@ define <4 x iXLen> @lrint_v4f32(<4 x float> %x) {
; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-i64-NEXT: vmv.v.x v10, a1
; RV64-i64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-i64-NEXT: vslidedown.vi v9, v8, 2
; RV64-i64-NEXT: vfmv.f.s fa5, v9
; RV64-i64-NEXT: fcvt.l.s a0, fa5
-; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-i64-NEXT: vslidedown.vi v8, v8, 3
; RV64-i64-NEXT: vfmv.f.s fa5, v8
; RV64-i64-NEXT: fcvt.l.s a0, fa5
-; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64-i64-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i64-NEXT: vslide1down.vx v8, v10, a0
; RV64-i64-NEXT: ret
%a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float> %x)
@@ -248,29 +242,21 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
; RV32-NEXT: fcvt.w.s a0, fa5
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 4
; RV32-NEXT: vfmv.f.s fa5, v12
; RV32-NEXT: fcvt.w.s a0, fa5
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 5
; RV32-NEXT: vfmv.f.s fa5, v12
; RV32-NEXT: fcvt.w.s a0, fa5
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 6
; RV32-NEXT: vfmv.f.s fa5, v12
; RV32-NEXT: fcvt.w.s a0, fa5
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 7
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.s a0, fa5
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vslide1down.vx v8, v10, a0
; RV32-NEXT: ret
;
@@ -297,29 +283,21 @@ define <8 x iXLen> @lrint_v8f32(<8 x float> %x) {
; RV64-i32-NEXT: fcvt.l.s a0, fa5
; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v12, v8, 4
; RV64-i32-NEXT: vfmv.f.s fa5, v12
; RV64-i32-NEXT: fcvt.l.s a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v12, v8, 5
; RV64-i32-NEXT: vfmv.f.s fa5, v12
; RV64-i32-NEXT: fcvt.l.s a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v12, v8, 6
; RV64-i32-NEXT: vfmv.f.s fa5, v12
; RV64-i32-NEXT: fcvt.l.s a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v8, v8, 7
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.s a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV64-i32-NEXT: vslide1down.vx v8, v10, a0
; RV64-i32-NEXT: ret
;
@@ -685,13 +663,12 @@ define <2 x iXLen> @lrint_v2f64(<2 x double> %x) {
;
; RV64-i64-LABEL: lrint_v2f64:
; RV64-i64: # %bb.0:
-; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-i64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-i64-NEXT: vslidedown.vi v9, v8, 1
; RV64-i64-NEXT: vfmv.f.s fa5, v9
; RV64-i64-NEXT: fcvt.l.d a0, fa5
; RV64-i64-NEXT: vfmv.f.s fa5, v8
; RV64-i64-NEXT: fcvt.l.d a1, fa5
-; RV64-i64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-i64-NEXT: vmv.v.x v8, a1
; RV64-i64-NEXT: vslide1down.vx v8, v8, a0
; RV64-i64-NEXT: ret
@@ -712,17 +689,17 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vmv.v.x v10, a1
; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v12, v8, 2
; RV32-NEXT: vfmv.f.s fa5, v12
; RV32-NEXT: fcvt.w.d a0, fa5
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.d a0, fa5
-; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32-NEXT: vslide1down.vx v8, v10, a0
; RV32-NEXT: ret
;
@@ -737,17 +714,17 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64-i32-NEXT: vmv.v.x v10, a1
; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v12, v8, 2
; RV64-i32-NEXT: vfmv.f.s fa5, v12
; RV64-i32-NEXT: fcvt.l.d a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i32-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; RV64-i32-NEXT: vslidedown.vi v8, v8, 3
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.d a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
+; RV64-i32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64-i32-NEXT: vslide1down.vx v8, v10, a0
; RV64-i32-NEXT: ret
;
@@ -762,17 +739,13 @@ define <4 x iXLen> @lrint_v4f64(<4 x double> %x) {
; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-i64-NEXT: vmv.v.x v10, a1
; RV64-i64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-i64-NEXT: vslidedown.vi v12, v8, 2
; RV64-i64-NEXT: vfmv.f.s fa5, v12
; RV64-i64-NEXT: fcvt.l.d a0, fa5
-; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-i64-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i64-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64-i64-NEXT: vslidedown.vi v8, v8, 3
; RV64-i64-NEXT: vfmv.f.s fa5, v8
; RV64-i64-NEXT: fcvt.l.d a0, fa5
-; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-i64-NEXT: vslide1down.vx v8, v10, a0
; RV64-i64-NEXT: ret
%a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double> %x)
@@ -801,30 +774,27 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
; RV32-NEXT: fcvt.w.d a0, fa5
; RV32-NEXT: vfmv.f.s fa5, v8
; RV32-NEXT: fcvt.w.d a1, fa5
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT: vmv.v.x v10, a1
-; RV32-NEXT: vslide1down.vx v10, v10, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV32-NEXT: vslidedown.vi v12, v8, 2
-; RV32-NEXT: vfmv.f.s fa5, v12
-; RV32-NEXT: fcvt.w.d a0, fa5
-; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v10, v10, a0
; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV32-NEXT: vslidedown.vi v10, v8, 2
+; RV32-NEXT: vfmv.f.s fa5, v10
+; RV32-NEXT: fcvt.w.d a2, fa5
; RV32-NEXT: vslidedown.vi v8, v8, 3
; RV32-NEXT: fld fa5, 32(sp)
; RV32-NEXT: vfmv.f.s fa4, v8
; RV32-NEXT: fld fa3, 40(sp)
-; RV32-NEXT: fcvt.w.d a0, fa4
-; RV32-NEXT: fcvt.w.d a1, fa5
-; RV32-NEXT: fld fa5, 48(sp)
-; RV32-NEXT: fcvt.w.d a2, fa3
+; RV32-NEXT: fcvt.w.d a3, fa4
+; RV32-NEXT: fcvt.w.d a4, fa5
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV32-NEXT: vslide1down.vx v8, v10, a0
+; RV32-NEXT: vmv.v.x v8, a1
+; RV32-NEXT: fcvt.w.d a1, fa3
+; RV32-NEXT: fld fa5, 48(sp)
+; RV32-NEXT: vslide1down.vx v8, v8, a0
+; RV32-NEXT: vslide1down.vx v8, v8, a2
+; RV32-NEXT: vslide1down.vx v8, v8, a3
; RV32-NEXT: fcvt.w.d a0, fa5
; RV32-NEXT: fld fa5, 56(sp)
+; RV32-NEXT: vslide1down.vx v8, v8, a4
; RV32-NEXT: vslide1down.vx v8, v8, a1
-; RV32-NEXT: vslide1down.vx v8, v8, a2
; RV32-NEXT: vslide1down.vx v8, v8, a0
; RV32-NEXT: fcvt.w.d a0, fa5
; RV32-NEXT: vslide1down.vx v8, v8, a0
@@ -854,30 +824,27 @@ define <8 x iXLen> @lrint_v8f64(<8 x double> %x) {
; RV64-i32-NEXT: fcvt.l.d a0, fa5
; RV64-i32-NEXT: vfmv.f.s fa5, v8
; RV64-i32-NEXT: fcvt.l.d a1, fa5
-; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64-i32-NEXT: vmv.v.x v10, a1
-; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
-; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
-; RV64-i32-NEXT: vslidedown.vi v12, v8, 2
-; RV64-i32-NEXT: vfmv.f.s fa5, v12
-; RV64-i32-NEXT: fcvt.l.d a0, fa5
-; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64-i32-NEXT: vslide1down.vx v10, v10, a0
; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma
+; RV64-i32-NEXT: vslidedown.vi v10, v8, 2
+; RV64-i32-NEXT: vfmv.f.s fa5, v10
+; RV64-i32-NEXT: fcvt.l.d a2, fa5
; RV64-i32-NEXT: vslidedown.vi v8, v8, 3
; RV64-i32-NEXT: fld fa5, 32(sp)
; RV64-i32-NEXT: vfmv.f.s fa4, v8
; RV64-i32-NEXT: fld fa3, 40(sp)
-; RV64-i32-NEXT: fcvt.l.d a0, fa4
-; RV64-i32-NEXT: fcvt.l.d a1, fa5
-; RV64-i32-NEXT: fld fa5, 48(sp)
-; RV64-i32-NEXT: fcvt.l.d a2, fa3
+; RV64-i32-NEXT: fcvt.l.d a3, fa4
+; RV64-i32-NEXT: fcvt.l.d a4, fa5
; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; RV64-i32-NEXT: vslide1down.vx v8, v10, a0
+; RV64-i32-NEXT: vmv.v.x v8, a1
+; RV64-i32-NEXT: fcvt.l.d a1, fa3
+; RV64-i32-NEXT: fld fa5, 48(sp)
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a2
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a3
; RV64-i32-NEXT: fcvt.l.d a0, fa5
; RV64-i32-NEXT: fld fa5, 56(sp)
+; RV64-i32-NEXT: vslide1down.vx v8, v8, a4
; RV64-i32-NEXT: vslide1down.vx v8, v8, a1
-; RV64-i32-NEXT: vslide1down.vx v8, v8, a2
; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
; RV64-i32-NEXT: fcvt.l.d a0, fa5
; RV64-i32-NEXT: vslide1down.vx v8, v8, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
index 023d707f07bf..174831518693 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-buildvec.ll
@@ -245,8 +245,8 @@ define <8 x i1> @buildvec_mask_v8i1() {
define <8 x i1> @buildvec_mask_nonconst_v8i1(i1 %x, i1 %y) {
; CHECK-LABEL: buildvec_mask_nonconst_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: li a2, 19
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a2
; CHECK-NEXT: vmv.v.x v8, a1
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
@@ -256,8 +256,8 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1(i1 %x, i1 %y) {
;
; ZVE32F-LABEL: buildvec_mask_nonconst_v8i1:
; ZVE32F: # %bb.0:
-; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; ZVE32F-NEXT: li a2, 19
+; ZVE32F-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; ZVE32F-NEXT: vmv.s.x v0, a2
; ZVE32F-NEXT: vmv.v.x v8, a1
; ZVE32F-NEXT: vmerge.vxm v8, v8, a0, v0
@@ -286,8 +286,8 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %w) {
; CHECK-NEXT: vslide1down.vx v9, v9, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a3
; CHECK-NEXT: vslide1down.vx v8, v8, zero
-; CHECK-NEXT: vslide1down.vx v8, v8, a2
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vslide1down.vx v8, v8, a2
; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -303,8 +303,8 @@ define <8 x i1> @buildvec_mask_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %w) {
; ZVE32F-NEXT: vslide1down.vx v9, v9, a1
; ZVE32F-NEXT: vslide1down.vx v8, v8, a3
; ZVE32F-NEXT: vslide1down.vx v8, v8, zero
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a2
; ZVE32F-NEXT: vmv.v.i v0, 15
+; ZVE32F-NEXT: vslide1down.vx v8, v8, a2
; ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; ZVE32F-NEXT: vand.vi v8, v8, 1
; ZVE32F-NEXT: vmsne.vi v0, v8, 0
@@ -331,8 +331,8 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %
; CHECK-NEXT: vslide1down.vx v9, v9, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a3
; CHECK-NEXT: vslide1down.vx v8, v8, zero
-; CHECK-NEXT: vslide1down.vx v8, v8, a2
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vslide1down.vx v8, v8, a2
; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -348,8 +348,8 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1_2(i1 %x, i1 %y, i1 %z, i1 %
; ZVE32F-NEXT: vslide1down.vx v9, v9, a1
; ZVE32F-NEXT: vslide1down.vx v8, v8, a3
; ZVE32F-NEXT: vslide1down.vx v8, v8, zero
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a2
; ZVE32F-NEXT: vmv.v.i v0, 15
+; ZVE32F-NEXT: vslide1down.vx v8, v8, a2
; ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; ZVE32F-NEXT: vand.vi v8, v8, 1
; ZVE32F-NEXT: vmsne.vi v0, v8, 0
@@ -375,8 +375,8 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1(i1 %x, i1 %y) optsize {
; CHECK-NEXT: vslide1down.vx v9, v9, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a1
; CHECK-NEXT: vslide1down.vx v8, v8, a1
-; CHECK-NEXT: vslide1down.vx v8, v8, a1
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vslide1down.vx v8, v8, a1
; CHECK-NEXT: vslidedown.vi v8, v9, 4, v0.t
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -391,8 +391,8 @@ define <8 x i1> @buildvec_mask_optsize_nonconst_v8i1(i1 %x, i1 %y) optsize {
; ZVE32F-NEXT: vslide1down.vx v9, v9, a1
; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
-; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
; ZVE32F-NEXT: vmv.v.i v0, 15
+; ZVE32F-NEXT: vslide1down.vx v8, v8, a1
; ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; ZVE32F-NEXT: vand.vi v8, v8, 1
; ZVE32F-NEXT: vmsne.vi v0, v8, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
index 7fc442c88d10..979785dd2c02 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-mask-splat.ll
@@ -24,11 +24,11 @@ define void @splat_zeros_v2i1(ptr %x) {
define void @splat_v1i1(ptr %x, i1 %y) {
; CHECK-LABEL: splat_v1i1:
; CHECK: # %bb.0:
+; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vmv.s.x v8, a1
+; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: vmv.s.x v8, zero
-; CHECK-NEXT: andi a1, a1, 1
-; CHECK-NEXT: vmv.s.x v9, a1
-; CHECK-NEXT: vmsne.vi v0, v9, 0
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index 08cad29ab1b8..db0969c85a8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -520,16 +520,16 @@ define <4 x i8> @mgather_truemask_v4i8(<4 x ptr> %ptrs, <4 x i8> %passthru) {
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: ld a1, 8(a0)
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 24(a0)
-; RV64ZVE32F-NEXT: ld a0, 16(a0)
+; RV64ZVE32F-NEXT: ld a3, 16(a0)
+; RV64ZVE32F-NEXT: ld a0, 24(a0)
; RV64ZVE32F-NEXT: lbu a1, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vlse8.v v8, (a2), zero
-; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: lbu a2, 0(a3)
+; RV64ZVE32F-NEXT: lbu a0, 0(a0)
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: ret
%v = call <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr> %ptrs, i32 1, <4 x i1> splat (i1 1), <4 x i8> %passthru)
ret <4 x i8> %v
@@ -711,8 +711,8 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: .LBB12_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB12_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -734,8 +734,8 @@ define <8 x i8> @mgather_baseidx_v8i8(ptr %base, <8 x i8> %idxs, <8 x i1> %m, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, mf2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB12_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB12_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -1209,16 +1209,16 @@ define <4 x i16> @mgather_truemask_v4i16(<4 x ptr> %ptrs, <4 x i16> %passthru) {
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: ld a1, 8(a0)
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 24(a0)
-; RV64ZVE32F-NEXT: ld a0, 16(a0)
+; RV64ZVE32F-NEXT: ld a3, 16(a0)
+; RV64ZVE32F-NEXT: ld a0, 24(a0)
; RV64ZVE32F-NEXT: lh a1, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vlse16.v v8, (a2), zero
-; RV64ZVE32F-NEXT: lh a0, 0(a0)
; RV64ZVE32F-NEXT: lh a2, 0(a3)
+; RV64ZVE32F-NEXT: lh a0, 0(a0)
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: ret
%v = call <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1), <4 x i16> %passthru)
ret <4 x i16> %v
@@ -1405,8 +1405,8 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: .LBB23_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB23_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -1430,8 +1430,8 @@ define <8 x i16> @mgather_baseidx_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB23_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB23_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -1556,8 +1556,8 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB24_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB24_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -1581,8 +1581,8 @@ define <8 x i16> @mgather_baseidx_sext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB24_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB24_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -1708,8 +1708,8 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB25_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB25_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -1734,8 +1734,8 @@ define <8 x i16> @mgather_baseidx_zext_v8i8_v8i16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB25_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB25_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -1863,8 +1863,8 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: .LBB26_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB26_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -1887,8 +1887,8 @@ define <8 x i16> @mgather_baseidx_v8i16(ptr %base, <8 x i16> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB26_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB26_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -2258,16 +2258,16 @@ define <4 x i32> @mgather_truemask_v4i32(<4 x ptr> %ptrs, <4 x i32> %passthru) {
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: ld a1, 8(a0)
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 24(a0)
-; RV64ZVE32F-NEXT: ld a0, 16(a0)
+; RV64ZVE32F-NEXT: ld a3, 16(a0)
+; RV64ZVE32F-NEXT: ld a0, 24(a0)
; RV64ZVE32F-NEXT: lw a1, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vlse32.v v8, (a2), zero
-; RV64ZVE32F-NEXT: lw a0, 0(a0)
; RV64ZVE32F-NEXT: lw a2, 0(a3)
+; RV64ZVE32F-NEXT: lw a0, 0(a0)
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: ret
%v = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1), <4 x i32> %passthru)
ret <4 x i32> %v
@@ -2453,8 +2453,8 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: .LBB35_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB35_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -2478,8 +2478,8 @@ define <8 x i32> @mgather_baseidx_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB35_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB35_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -2603,8 +2603,8 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB36_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB36_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -2628,8 +2628,8 @@ define <8 x i32> @mgather_baseidx_sext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB36_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB36_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -2757,8 +2757,8 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB37_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB37_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -2783,8 +2783,8 @@ define <8 x i32> @mgather_baseidx_zext_v8i8_v8i32(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB37_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB37_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -2915,8 +2915,8 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: .LBB38_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB38_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -2940,8 +2940,8 @@ define <8 x i32> @mgather_baseidx_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB38_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB38_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -3066,8 +3066,8 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: .LBB39_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB39_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -3091,8 +3091,8 @@ define <8 x i32> @mgather_baseidx_sext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB39_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB39_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -3221,8 +3221,8 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: .LBB40_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB40_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -3247,8 +3247,8 @@ define <8 x i32> @mgather_baseidx_zext_v8i16_v8i32(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB40_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB40_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -3364,20 +3364,19 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB41_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1
; RV64ZVE32F-NEXT: .LBB41_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB41_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -3400,8 +3399,8 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB41_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v12, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB41_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -3421,14 +3420,13 @@ define <8 x i32> @mgather_baseidx_v8i32(ptr %base, <8 x i32> %idxs, <8 x i1> %m,
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB41_6
; RV64ZVE32F-NEXT: .LBB41_13: # %cond.load7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: lw a2, 0(a2)
; RV64ZVE32F-NEXT: vmv.s.x v8, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB41_7
@@ -4090,13 +4088,13 @@ define <8 x i64> @mgather_baseidx_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB48_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB48_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -4253,8 +4251,8 @@ define <8 x i64> @mgather_baseidx_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8 x i1>
; RV64ZVE32F-NEXT: .LBB48_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB48_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -4364,13 +4362,13 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB49_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB49_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -4527,8 +4525,8 @@ define <8 x i64> @mgather_baseidx_sext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB49_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB49_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -4640,13 +4638,13 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB50_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB50_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -4805,8 +4803,8 @@ define <8 x i64> @mgather_baseidx_zext_v8i8_v8i64(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB50_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB50_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -4923,13 +4921,13 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB51_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB51_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -5087,8 +5085,8 @@ define <8 x i64> @mgather_baseidx_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <8 x i
; RV64ZVE32F-NEXT: .LBB51_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB51_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -5198,13 +5196,13 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB52_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB52_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -5362,8 +5360,8 @@ define <8 x i64> @mgather_baseidx_sext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: .LBB52_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB52_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -5475,13 +5473,13 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB53_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB53_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -5643,8 +5641,8 @@ define <8 x i64> @mgather_baseidx_zext_v8i16_v8i64(ptr %base, <8 x i16> %idxs, <
; RV64ZVE32F-NEXT: .LBB53_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a7, a6, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a7, .LBB53_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -5759,13 +5757,13 @@ define <8 x i64> @mgather_baseidx_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <8 x i
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB54_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB54_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -5923,8 +5921,8 @@ define <8 x i64> @mgather_baseidx_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <8 x i
; RV64ZVE32F-NEXT: .LBB54_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB54_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -6032,13 +6030,13 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB55_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB55_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -6196,8 +6194,8 @@ define <8 x i64> @mgather_baseidx_sext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV64ZVE32F-NEXT: .LBB55_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB55_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -6306,13 +6304,13 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB56_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a3, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a3, .LBB56_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a3, v8
; RV32ZVE32F-NEXT: lw a1, 4(a3)
; RV32ZVE32F-NEXT: lw a3, 0(a3)
@@ -6472,8 +6470,8 @@ define <8 x i64> @mgather_baseidx_zext_v8i32_v8i64(ptr %base, <8 x i32> %idxs, <
; RV64ZVE32F-NEXT: .LBB56_5: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a6, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: beqz a6, .LBB56_10
; RV64ZVE32F-NEXT: # %bb.6: # %cond.load4
@@ -6603,13 +6601,13 @@ define <8 x i64> @mgather_baseidx_v8i64(ptr %base, <8 x i64> %idxs, <8 x i1> %m,
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, a5
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, a4
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v0
-; RV32ZVE32F-NEXT: andi a1, t0, 1
-; RV32ZVE32F-NEXT: beqz a1, .LBB57_7
-; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
+; RV32ZVE32F-NEXT: andi a2, t0, 1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: beqz a2, .LBB57_7
+; RV32ZVE32F-NEXT: # %bb.1: # %cond.load
; RV32ZVE32F-NEXT: vmv.x.s a2, v8
; RV32ZVE32F-NEXT: lw a1, 4(a2)
; RV32ZVE32F-NEXT: lw a2, 0(a2)
@@ -7018,13 +7016,13 @@ define <4 x half> @mgather_truemask_v4f16(<4 x ptr> %ptrs, <4 x half> %passthru)
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: ld a1, 8(a0)
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 24(a0)
-; RV64ZVE32F-NEXT: ld a0, 16(a0)
+; RV64ZVE32F-NEXT: ld a3, 16(a0)
+; RV64ZVE32F-NEXT: ld a0, 24(a0)
; RV64ZVE32F-NEXT: flh fa5, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vlse16.v v8, (a2), zero
-; RV64ZVE32F-NEXT: flh fa4, 0(a0)
-; RV64ZVE32F-NEXT: flh fa3, 0(a3)
+; RV64ZVE32F-NEXT: flh fa4, 0(a3)
+; RV64ZVE32F-NEXT: flh fa3, 0(a0)
; RV64ZVE32F-NEXT: vfslide1down.vf v8, v8, fa5
; RV64ZVE32F-NEXT: vfslide1down.vf v8, v8, fa4
; RV64ZVE32F-NEXT: vfslide1down.vf v8, v8, fa3
@@ -7214,8 +7212,8 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-NEXT: .LBB64_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB64_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -7239,8 +7237,8 @@ define <8 x half> @mgather_baseidx_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8 x i1
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB64_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB64_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -7365,8 +7363,8 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB65_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB65_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -7390,8 +7388,8 @@ define <8 x half> @mgather_baseidx_sext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB65_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB65_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -7517,8 +7515,8 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB66_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB66_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -7543,8 +7541,8 @@ define <8 x half> @mgather_baseidx_zext_v8i8_v8f16(ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB66_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB66_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -7672,8 +7670,8 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-NEXT: .LBB67_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB67_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -7696,8 +7694,8 @@ define <8 x half> @mgather_baseidx_v8f16(ptr %base, <8 x i16> %idxs, <8 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 6, e16, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 5
; RV64ZVE32F-NEXT: .LBB67_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB67_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -7941,13 +7939,13 @@ define <4 x float> @mgather_truemask_v4f32(<4 x ptr> %ptrs, <4 x float> %passthr
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: ld a1, 8(a0)
; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 24(a0)
-; RV64ZVE32F-NEXT: ld a0, 16(a0)
+; RV64ZVE32F-NEXT: ld a3, 16(a0)
+; RV64ZVE32F-NEXT: ld a0, 24(a0)
; RV64ZVE32F-NEXT: flw fa5, 0(a1)
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vlse32.v v8, (a2), zero
-; RV64ZVE32F-NEXT: flw fa4, 0(a0)
-; RV64ZVE32F-NEXT: flw fa3, 0(a3)
+; RV64ZVE32F-NEXT: flw fa4, 0(a3)
+; RV64ZVE32F-NEXT: flw fa3, 0(a0)
; RV64ZVE32F-NEXT: vfslide1down.vf v8, v8, fa5
; RV64ZVE32F-NEXT: vfslide1down.vf v8, v8, fa4
; RV64ZVE32F-NEXT: vfslide1down.vf v8, v8, fa3
@@ -8136,8 +8134,8 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
; RV64ZVE32F-NEXT: .LBB74_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB74_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -8161,8 +8159,8 @@ define <8 x float> @mgather_baseidx_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <8 x i
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB74_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB74_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -8286,8 +8284,8 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: .LBB75_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB75_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -8311,8 +8309,8 @@ define <8 x float> @mgather_baseidx_sext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB75_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB75_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -8440,8 +8438,8 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: .LBB76_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB76_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -8466,8 +8464,8 @@ define <8 x float> @mgather_baseidx_zext_v8i8_v8f32(ptr %base, <8 x i8> %idxs, <
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB76_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB76_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -8598,8 +8596,8 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
; RV64ZVE32F-NEXT: .LBB77_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB77_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -8623,8 +8621,8 @@ define <8 x float> @mgather_baseidx_v8i16_v8f32(ptr %base, <8 x i16> %idxs, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB77_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB77_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -8749,8 +8747,8 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: .LBB78_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB78_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -8774,8 +8772,8 @@ define <8 x float> @mgather_baseidx_sext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB78_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB78_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -8904,8 +8902,8 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: .LBB79_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB79_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -8930,8 +8928,8 @@ define <8 x float> @mgather_baseidx_zext_v8i16_v8f32(ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB79_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB79_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -9047,20 +9045,19 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: andi a2, a1, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB80_4
; RV64ZVE32F-NEXT: # %bb.3: # %cond.load1
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
; RV64ZVE32F-NEXT: vfmv.s.f v12, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1
; RV64ZVE32F-NEXT: .LBB80_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB80_12
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -9083,8 +9080,8 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: vsetivli zero, 6, e32, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 5
; RV64ZVE32F-NEXT: .LBB80_9: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v12, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB80_15
; RV64ZVE32F-NEXT: # %bb.10: # %else17
@@ -9104,14 +9101,13 @@ define <8 x float> @mgather_baseidx_v8f32(ptr %base, <8 x i32> %idxs, <8 x i1> %
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB80_6
; RV64ZVE32F-NEXT: .LBB80_13: # %cond.load7
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 1
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
; RV64ZVE32F-NEXT: flw fa5, 0(a2)
; RV64ZVE32F-NEXT: vfmv.s.f v8, fa5
-; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v8, 3
; RV64ZVE32F-NEXT: andi a2, a1, 16
; RV64ZVE32F-NEXT: beqz a2, .LBB80_7
@@ -9664,31 +9660,32 @@ define <8 x double> @mgather_baseidx_v8i8_v8f64(ptr %base, <8 x i8> %idxs, <8 x
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB87_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB87_11
; RV32ZVE32F-NEXT: .LBB87_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB87_12
; RV32ZVE32F-NEXT: .LBB87_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB87_13
; RV32ZVE32F-NEXT: .LBB87_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB87_14
; RV32ZVE32F-NEXT: .LBB87_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB87_15
; RV32ZVE32F-NEXT: .LBB87_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB87_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB87_16
; RV32ZVE32F-NEXT: .LBB87_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB87_9
; RV32ZVE32F-NEXT: .LBB87_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9706,52 +9703,51 @@ define <8 x double> @mgather_baseidx_v8i8_v8f64(ptr %base, <8 x i8> %idxs, <8 x
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB87_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB87_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB87_2
; RV32ZVE32F-NEXT: .LBB87_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB87_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB87_3
; RV32ZVE32F-NEXT: .LBB87_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB87_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB87_4
; RV32ZVE32F-NEXT: .LBB87_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB87_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB87_5
; RV32ZVE32F-NEXT: .LBB87_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB87_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB87_6
; RV32ZVE32F-NEXT: .LBB87_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB87_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB87_7
; RV32ZVE32F-NEXT: .LBB87_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB87_8
; RV32ZVE32F-NEXT: j .LBB87_9
;
@@ -9779,8 +9775,8 @@ define <8 x double> @mgather_baseidx_v8i8_v8f64(ptr %base, <8 x i8> %idxs, <8 x
; RV64ZVE32F-NEXT: .LBB87_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB87_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -9879,31 +9875,32 @@ define <8 x double> @mgather_baseidx_sext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB88_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB88_11
; RV32ZVE32F-NEXT: .LBB88_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB88_12
; RV32ZVE32F-NEXT: .LBB88_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB88_13
; RV32ZVE32F-NEXT: .LBB88_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB88_14
; RV32ZVE32F-NEXT: .LBB88_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB88_15
; RV32ZVE32F-NEXT: .LBB88_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB88_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB88_16
; RV32ZVE32F-NEXT: .LBB88_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB88_9
; RV32ZVE32F-NEXT: .LBB88_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9921,52 +9918,51 @@ define <8 x double> @mgather_baseidx_sext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB88_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB88_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB88_2
; RV32ZVE32F-NEXT: .LBB88_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB88_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB88_3
; RV32ZVE32F-NEXT: .LBB88_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB88_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB88_4
; RV32ZVE32F-NEXT: .LBB88_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB88_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB88_5
; RV32ZVE32F-NEXT: .LBB88_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB88_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB88_6
; RV32ZVE32F-NEXT: .LBB88_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB88_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB88_7
; RV32ZVE32F-NEXT: .LBB88_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB88_8
; RV32ZVE32F-NEXT: j .LBB88_9
;
@@ -9994,8 +9990,8 @@ define <8 x double> @mgather_baseidx_sext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB88_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB88_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -10096,31 +10092,32 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB89_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB89_11
; RV32ZVE32F-NEXT: .LBB89_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB89_12
; RV32ZVE32F-NEXT: .LBB89_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB89_13
; RV32ZVE32F-NEXT: .LBB89_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB89_14
; RV32ZVE32F-NEXT: .LBB89_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB89_15
; RV32ZVE32F-NEXT: .LBB89_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB89_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB89_16
; RV32ZVE32F-NEXT: .LBB89_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB89_9
; RV32ZVE32F-NEXT: .LBB89_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10138,52 +10135,51 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB89_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB89_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB89_2
; RV32ZVE32F-NEXT: .LBB89_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB89_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB89_3
; RV32ZVE32F-NEXT: .LBB89_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB89_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB89_4
; RV32ZVE32F-NEXT: .LBB89_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB89_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB89_5
; RV32ZVE32F-NEXT: .LBB89_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB89_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB89_6
; RV32ZVE32F-NEXT: .LBB89_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB89_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB89_7
; RV32ZVE32F-NEXT: .LBB89_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB89_8
; RV32ZVE32F-NEXT: j .LBB89_9
;
@@ -10213,8 +10209,8 @@ define <8 x double> @mgather_baseidx_zext_v8i8_v8f64(ptr %base, <8 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB89_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB89_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -10320,31 +10316,32 @@ define <8 x double> @mgather_baseidx_v8i16_v8f64(ptr %base, <8 x i16> %idxs, <8
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB90_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB90_11
; RV32ZVE32F-NEXT: .LBB90_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB90_12
; RV32ZVE32F-NEXT: .LBB90_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB90_13
; RV32ZVE32F-NEXT: .LBB90_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB90_14
; RV32ZVE32F-NEXT: .LBB90_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB90_15
; RV32ZVE32F-NEXT: .LBB90_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB90_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB90_16
; RV32ZVE32F-NEXT: .LBB90_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB90_9
; RV32ZVE32F-NEXT: .LBB90_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10362,52 +10359,51 @@ define <8 x double> @mgather_baseidx_v8i16_v8f64(ptr %base, <8 x i16> %idxs, <8
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB90_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB90_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB90_2
; RV32ZVE32F-NEXT: .LBB90_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB90_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB90_3
; RV32ZVE32F-NEXT: .LBB90_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB90_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB90_4
; RV32ZVE32F-NEXT: .LBB90_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB90_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB90_5
; RV32ZVE32F-NEXT: .LBB90_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB90_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB90_6
; RV32ZVE32F-NEXT: .LBB90_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB90_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB90_7
; RV32ZVE32F-NEXT: .LBB90_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB90_8
; RV32ZVE32F-NEXT: j .LBB90_9
;
@@ -10436,8 +10432,8 @@ define <8 x double> @mgather_baseidx_v8i16_v8f64(ptr %base, <8 x i16> %idxs, <8
; RV64ZVE32F-NEXT: .LBB90_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB90_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -10536,31 +10532,32 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB91_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB91_11
; RV32ZVE32F-NEXT: .LBB91_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB91_12
; RV32ZVE32F-NEXT: .LBB91_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB91_13
; RV32ZVE32F-NEXT: .LBB91_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB91_14
; RV32ZVE32F-NEXT: .LBB91_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB91_15
; RV32ZVE32F-NEXT: .LBB91_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB91_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB91_16
; RV32ZVE32F-NEXT: .LBB91_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB91_9
; RV32ZVE32F-NEXT: .LBB91_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10578,52 +10575,51 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB91_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB91_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB91_2
; RV32ZVE32F-NEXT: .LBB91_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB91_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB91_3
; RV32ZVE32F-NEXT: .LBB91_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB91_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB91_4
; RV32ZVE32F-NEXT: .LBB91_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB91_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB91_5
; RV32ZVE32F-NEXT: .LBB91_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB91_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB91_6
; RV32ZVE32F-NEXT: .LBB91_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB91_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB91_7
; RV32ZVE32F-NEXT: .LBB91_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB91_8
; RV32ZVE32F-NEXT: j .LBB91_9
;
@@ -10652,8 +10648,8 @@ define <8 x double> @mgather_baseidx_sext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV64ZVE32F-NEXT: .LBB91_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB91_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -10754,31 +10750,32 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB92_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB92_11
; RV32ZVE32F-NEXT: .LBB92_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB92_12
; RV32ZVE32F-NEXT: .LBB92_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB92_13
; RV32ZVE32F-NEXT: .LBB92_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB92_14
; RV32ZVE32F-NEXT: .LBB92_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB92_15
; RV32ZVE32F-NEXT: .LBB92_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB92_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB92_16
; RV32ZVE32F-NEXT: .LBB92_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB92_9
; RV32ZVE32F-NEXT: .LBB92_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10796,52 +10793,51 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB92_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB92_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB92_2
; RV32ZVE32F-NEXT: .LBB92_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB92_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB92_3
; RV32ZVE32F-NEXT: .LBB92_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB92_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB92_4
; RV32ZVE32F-NEXT: .LBB92_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB92_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB92_5
; RV32ZVE32F-NEXT: .LBB92_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB92_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB92_6
; RV32ZVE32F-NEXT: .LBB92_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB92_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB92_7
; RV32ZVE32F-NEXT: .LBB92_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB92_8
; RV32ZVE32F-NEXT: j .LBB92_9
;
@@ -10874,8 +10870,8 @@ define <8 x double> @mgather_baseidx_zext_v8i16_v8f64(ptr %base, <8 x i16> %idxs
; RV64ZVE32F-NEXT: .LBB92_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a4, a3, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a4, .LBB92_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -10979,31 +10975,32 @@ define <8 x double> @mgather_baseidx_v8i32_v8f64(ptr %base, <8 x i32> %idxs, <8
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB93_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB93_11
; RV32ZVE32F-NEXT: .LBB93_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB93_12
; RV32ZVE32F-NEXT: .LBB93_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB93_13
; RV32ZVE32F-NEXT: .LBB93_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB93_14
; RV32ZVE32F-NEXT: .LBB93_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB93_15
; RV32ZVE32F-NEXT: .LBB93_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB93_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB93_16
; RV32ZVE32F-NEXT: .LBB93_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB93_9
; RV32ZVE32F-NEXT: .LBB93_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -11021,52 +11018,51 @@ define <8 x double> @mgather_baseidx_v8i32_v8f64(ptr %base, <8 x i32> %idxs, <8
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB93_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB93_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB93_2
; RV32ZVE32F-NEXT: .LBB93_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB93_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB93_3
; RV32ZVE32F-NEXT: .LBB93_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB93_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB93_4
; RV32ZVE32F-NEXT: .LBB93_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB93_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB93_5
; RV32ZVE32F-NEXT: .LBB93_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB93_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB93_6
; RV32ZVE32F-NEXT: .LBB93_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB93_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB93_7
; RV32ZVE32F-NEXT: .LBB93_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB93_8
; RV32ZVE32F-NEXT: j .LBB93_9
;
@@ -11095,8 +11091,8 @@ define <8 x double> @mgather_baseidx_v8i32_v8f64(ptr %base, <8 x i32> %idxs, <8
; RV64ZVE32F-NEXT: .LBB93_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB93_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -11193,31 +11189,32 @@ define <8 x double> @mgather_baseidx_sext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB94_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_11
; RV32ZVE32F-NEXT: .LBB94_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_12
; RV32ZVE32F-NEXT: .LBB94_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_13
; RV32ZVE32F-NEXT: .LBB94_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_14
; RV32ZVE32F-NEXT: .LBB94_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_15
; RV32ZVE32F-NEXT: .LBB94_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB94_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB94_16
; RV32ZVE32F-NEXT: .LBB94_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB94_9
; RV32ZVE32F-NEXT: .LBB94_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -11235,52 +11232,51 @@ define <8 x double> @mgather_baseidx_sext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB94_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB94_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_2
; RV32ZVE32F-NEXT: .LBB94_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB94_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_3
; RV32ZVE32F-NEXT: .LBB94_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB94_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_4
; RV32ZVE32F-NEXT: .LBB94_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB94_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_5
; RV32ZVE32F-NEXT: .LBB94_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB94_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_6
; RV32ZVE32F-NEXT: .LBB94_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB94_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB94_7
; RV32ZVE32F-NEXT: .LBB94_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB94_8
; RV32ZVE32F-NEXT: j .LBB94_9
;
@@ -11309,8 +11305,8 @@ define <8 x double> @mgather_baseidx_sext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: .LBB94_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB94_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -11408,31 +11404,32 @@ define <8 x double> @mgather_baseidx_zext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB95_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_11
; RV32ZVE32F-NEXT: .LBB95_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_12
; RV32ZVE32F-NEXT: .LBB95_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_13
; RV32ZVE32F-NEXT: .LBB95_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_14
; RV32ZVE32F-NEXT: .LBB95_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_15
; RV32ZVE32F-NEXT: .LBB95_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB95_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB95_16
; RV32ZVE32F-NEXT: .LBB95_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB95_9
; RV32ZVE32F-NEXT: .LBB95_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -11450,52 +11447,51 @@ define <8 x double> @mgather_baseidx_zext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB95_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB95_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_2
; RV32ZVE32F-NEXT: .LBB95_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB95_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_3
; RV32ZVE32F-NEXT: .LBB95_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB95_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_4
; RV32ZVE32F-NEXT: .LBB95_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB95_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_5
; RV32ZVE32F-NEXT: .LBB95_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB95_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_6
; RV32ZVE32F-NEXT: .LBB95_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB95_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB95_7
; RV32ZVE32F-NEXT: .LBB95_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB95_8
; RV32ZVE32F-NEXT: j .LBB95_9
;
@@ -11526,8 +11522,8 @@ define <8 x double> @mgather_baseidx_zext_v8i32_v8f64(ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: .LBB95_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB95_14
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -11646,31 +11642,32 @@ define <8 x double> @mgather_baseidx_v8f64(ptr %base, <8 x i64> %idxs, <8 x i1>
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, a4
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, a3
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi a3, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez a3, .LBB96_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_11
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_11
; RV32ZVE32F-NEXT: .LBB96_2: # %else2
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_12
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_12
; RV32ZVE32F-NEXT: .LBB96_3: # %else5
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_13
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_13
; RV32ZVE32F-NEXT: .LBB96_4: # %else8
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_14
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_14
; RV32ZVE32F-NEXT: .LBB96_5: # %else11
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_15
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_15
; RV32ZVE32F-NEXT: .LBB96_6: # %else14
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: bnez a2, .LBB96_16
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: bnez a1, .LBB96_16
; RV32ZVE32F-NEXT: .LBB96_7: # %else17
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: beqz a1, .LBB96_9
; RV32ZVE32F-NEXT: .LBB96_8: # %cond.load19
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -11688,52 +11685,51 @@ define <8 x double> @mgather_baseidx_v8f64(ptr %base, <8 x i64> %idxs, <8 x i1>
; RV32ZVE32F-NEXT: fsd fa7, 56(a0)
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB96_10: # %cond.load
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a2, v8
-; RV32ZVE32F-NEXT: fld fa0, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 2
-; RV32ZVE32F-NEXT: beqz a2, .LBB96_2
+; RV32ZVE32F-NEXT: vmv.x.s a1, v8
+; RV32ZVE32F-NEXT: fld fa0, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 2
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_2
; RV32ZVE32F-NEXT: .LBB96_11: # %cond.load1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa1, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 4
-; RV32ZVE32F-NEXT: beqz a2, .LBB96_3
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa1, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 4
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_3
; RV32ZVE32F-NEXT: .LBB96_12: # %cond.load4
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa2, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 8
-; RV32ZVE32F-NEXT: beqz a2, .LBB96_4
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa2, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 8
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_4
; RV32ZVE32F-NEXT: .LBB96_13: # %cond.load7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa3, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 16
-; RV32ZVE32F-NEXT: beqz a2, .LBB96_5
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa3, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 16
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_5
; RV32ZVE32F-NEXT: .LBB96_14: # %cond.load10
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa4, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 32
-; RV32ZVE32F-NEXT: beqz a2, .LBB96_6
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa4, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 32
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_6
; RV32ZVE32F-NEXT: .LBB96_15: # %cond.load13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa5, 0(a2)
-; RV32ZVE32F-NEXT: andi a2, a1, 64
-; RV32ZVE32F-NEXT: beqz a2, .LBB96_7
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa5, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, 64
+; RV32ZVE32F-NEXT: beqz a1, .LBB96_7
; RV32ZVE32F-NEXT: .LBB96_16: # %cond.load16
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a2, v10
-; RV32ZVE32F-NEXT: fld fa6, 0(a2)
-; RV32ZVE32F-NEXT: andi a1, a1, -128
+; RV32ZVE32F-NEXT: vmv.x.s a1, v10
+; RV32ZVE32F-NEXT: fld fa6, 0(a1)
+; RV32ZVE32F-NEXT: andi a1, a2, -128
; RV32ZVE32F-NEXT: bnez a1, .LBB96_8
; RV32ZVE32F-NEXT: j .LBB96_9
;
@@ -11882,8 +11878,8 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: .LBB97_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB97_25
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -11900,8 +11896,8 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vmv.s.x v11, a2
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 4
; RV64ZVE32F-NEXT: .LBB97_8: # %else11
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 32
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB97_10
; RV64ZVE32F-NEXT: # %bb.9: # %cond.load13
@@ -11914,8 +11910,8 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v11, 5
; RV64ZVE32F-NEXT: .LBB97_10: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB97_27
; RV64ZVE32F-NEXT: # %bb.11: # %else17
@@ -11939,8 +11935,8 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: .LBB97_15: # %else26
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 1024
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB97_30
; RV64ZVE32F-NEXT: # %bb.16: # %else29
@@ -11962,8 +11958,8 @@ define <16 x i8> @mgather_baseidx_v16i8(ptr %base, <16 x i8> %idxs, <16 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v9, v8, 13
; RV64ZVE32F-NEXT: .LBB97_20: # %else38
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 49
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 2
; RV64ZVE32F-NEXT: bgez a2, .LBB97_22
; RV64ZVE32F-NEXT: # %bb.21: # %cond.load40
@@ -12092,22 +12088,22 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64V: # %bb.0:
; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64V-NEXT: vsext.vf8 v16, v8
-; RV64V-NEXT: vmv1r.v v12, v10
-; RV64V-NEXT: vsetvli zero, zero, e8, m1, ta, mu
-; RV64V-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64V-NEXT: vsetivli zero, 16, e8, m2, ta, ma
-; RV64V-NEXT: vslidedown.vi v10, v10, 16
+; RV64V-NEXT: vslidedown.vi v12, v10, 16
+; RV64V-NEXT: vsetivli zero, 16, e8, m1, ta, mu
+; RV64V-NEXT: vluxei64.v v10, (a0), v16, v0.t
+; RV64V-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64V-NEXT: vslidedown.vi v8, v8, 16
-; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64V-NEXT: vsext.vf8 v16, v8
; RV64V-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64V-NEXT: vslidedown.vi v0, v0, 2
-; RV64V-NEXT: vsetivli zero, 16, e8, m1, ta, mu
-; RV64V-NEXT: vluxei64.v v10, (a0), v16, v0.t
+; RV64V-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64V-NEXT: vsext.vf8 v16, v8
+; RV64V-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; RV64V-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64V-NEXT: li a0, 32
; RV64V-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; RV64V-NEXT: vslideup.vi v12, v10, 16
-; RV64V-NEXT: vmv.v.v v8, v12
+; RV64V-NEXT: vslideup.vi v10, v12, 16
+; RV64V-NEXT: vmv.v.v v8, v10
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_baseidx_v32i8:
@@ -12139,8 +12135,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: .LBB98_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB98_49
; RV64ZVE32F-NEXT: # %bb.5: # %else5
@@ -12157,8 +12153,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 4
; RV64ZVE32F-NEXT: .LBB98_8: # %else11
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 32
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB98_10
; RV64ZVE32F-NEXT: # %bb.9: # %cond.load13
@@ -12171,8 +12167,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 5
; RV64ZVE32F-NEXT: .LBB98_10: # %else14
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB98_51
; RV64ZVE32F-NEXT: # %bb.11: # %else17
@@ -12196,8 +12192,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: .LBB98_15: # %else26
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 1024
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB98_17
; RV64ZVE32F-NEXT: # %bb.16: # %cond.load28
@@ -12220,8 +12216,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 12, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 11
; RV64ZVE32F-NEXT: .LBB98_19: # %else32
-; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 51
+; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 16
; RV64ZVE32F-NEXT: bgez a2, .LBB98_21
; RV64ZVE32F-NEXT: # %bb.20: # %cond.load34
@@ -12244,8 +12240,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m1, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v9, 13
; RV64ZVE32F-NEXT: .LBB98_23: # %else38
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 49
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v13, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB98_54
; RV64ZVE32F-NEXT: # %bb.24: # %else41
@@ -12269,8 +12265,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: .LBB98_28: # %else50
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 45
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB98_57
; RV64ZVE32F-NEXT: # %bb.29: # %else53
@@ -12287,8 +12283,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 20
; RV64ZVE32F-NEXT: .LBB98_32: # %else59
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 42
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 8
; RV64ZVE32F-NEXT: bgez a2, .LBB98_34
; RV64ZVE32F-NEXT: # %bb.33: # %cond.load61
@@ -12301,8 +12297,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 22, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 21
; RV64ZVE32F-NEXT: .LBB98_34: # %else62
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 41
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB98_59
; RV64ZVE32F-NEXT: # %bb.35: # %else65
@@ -12326,8 +12322,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: .LBB98_39: # %else74
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 37
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB98_62
; RV64ZVE32F-NEXT: # %bb.40: # %else77
@@ -12349,8 +12345,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
; RV64ZVE32F-NEXT: vsetivli zero, 30, e8, m2, tu, ma
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 29
; RV64ZVE32F-NEXT: .LBB98_44: # %else86
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 33
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 2
; RV64ZVE32F-NEXT: bgez a2, .LBB98_46
; RV64ZVE32F-NEXT: # %bb.45: # %cond.load88
@@ -12640,11 +12636,10 @@ define <4 x i32> @mgather_narrow_edge_case(ptr %base) {
; RV64V-LABEL: mgather_narrow_edge_case:
; RV64V: # %bb.0:
; RV64V-NEXT: li a1, -512
-; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; RV64V-NEXT: vmv.v.x v8, a1
; RV64V-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; RV64V-NEXT: vmv.v.i v0, 5
; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
+; RV64V-NEXT: vmv.v.x v8, a1
; RV64V-NEXT: vmerge.vim v10, v8, 0, v0
; RV64V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64V-NEXT: vluxei64.v v8, (a0), v10
@@ -12728,8 +12723,8 @@ define <8 x i16> @mgather_strided_unaligned(ptr %base) {
; RV32-NEXT: vmv.v.x v8, a3
; RV32-NEXT: vslide1down.vx v8, v8, a5
; RV32-NEXT: vslide1down.vx v8, v8, a6
-; RV32-NEXT: vslide1down.vx v8, v8, a7
; RV32-NEXT: vmv.v.i v0, 15
+; RV32-NEXT: vslide1down.vx v8, v8, a7
; RV32-NEXT: vslidedown.vi v8, v9, 4, v0.t
; RV32-NEXT: ret
;
@@ -12803,8 +12798,8 @@ define <8 x i16> @mgather_strided_unaligned(ptr %base) {
; RV64V-NEXT: vmv.v.x v8, a3
; RV64V-NEXT: vslide1down.vx v8, v8, a5
; RV64V-NEXT: vslide1down.vx v8, v8, a6
-; RV64V-NEXT: vslide1down.vx v8, v8, a7
; RV64V-NEXT: vmv.v.i v0, 15
+; RV64V-NEXT: vslide1down.vx v8, v8, a7
; RV64V-NEXT: vslidedown.vi v8, v9, 4, v0.t
; RV64V-NEXT: addi sp, s0, -128
; RV64V-NEXT: ld ra, 120(sp) # 8-byte Folded Reload
@@ -12854,8 +12849,8 @@ define <8 x i16> @mgather_strided_unaligned(ptr %base) {
; RV64ZVE32F-NEXT: vmv.v.x v8, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a7
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
@@ -12896,8 +12891,8 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a4
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
@@ -12941,8 +12936,8 @@ define <8 x i16> @mgather_strided_2xSEW_with_offset(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a6
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i64> <i64 2, i64 3, i64 6, i64 7, i64 10, i64 11, i64 14, i64 15>
@@ -12986,8 +12981,8 @@ define <8 x i16> @mgather_reverse_unit_strided_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a6
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i64> <i64 14, i64 15, i64 12, i64 13, i64 10, i64 11, i64 8, i64 9>
@@ -13031,8 +13026,8 @@ define <8 x i16> @mgather_reverse_strided_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a6
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a2
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i64> <i64 14, i64 15, i64 10, i64 11, i64 6, i64 7, i64 2, i64 3>
@@ -13074,8 +13069,8 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a4
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 1, i32 8, i32 9, i32 4, i32 5, i32 2, i32 3>
@@ -13120,8 +13115,8 @@ define <8 x i16> @mgather_gather_2xSEW_unaligned(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a4
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 1, i32 9, i32 10, i32 4, i32 5, i32 2, i32 3>
@@ -13167,8 +13162,8 @@ define <8 x i16> @mgather_gather_2xSEW_unaligned2(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a2
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a3
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 1, i32 2, i32 9, i32 10, i32 4, i32 5, i32 2, i32 3>
@@ -13217,8 +13212,8 @@ define <8 x i16> @mgather_gather_4xSEW(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a4
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
@@ -13264,8 +13259,8 @@ define <8 x i16> @mgather_gather_4xSEW_partial_align(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a4
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 8, i32 9, i32 10, i32 11>
@@ -13320,8 +13315,8 @@ define <8 x i16> @mgather_shuffle_rotate(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslide1down.vx v9, v9, a2
; RV64ZVE32F-NEXT: vslide1down.vx v9, v9, a3
-; RV64ZVE32F-NEXT: vslide1down.vx v9, v9, a4
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v9, v9, a4
; RV64ZVE32F-NEXT: vslidedown.vi v8, v9, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i64> <i64 4, i64 5, i64 6, i64 7, i64 0, i64 1, i64 2, i64 3>
@@ -13367,8 +13362,8 @@ define <8 x i16> @mgather_shuffle_vrgather(ptr %base) {
; RV64ZVE32F-NEXT: vslide1down.vx v10, v8, a4
; RV64ZVE32F-NEXT: vslide1down.vx v8, v9, a5
; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a6
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vmv.v.i v0, 15
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a0
; RV64ZVE32F-NEXT: vslidedown.vi v8, v10, 4, v0.t
; RV64ZVE32F-NEXT: ret
%ptrs = getelementptr inbounds i16, ptr %base, <8 x i64> <i64 0, i64 2, i64 3, i64 1, i64 4, i64 5, i64 6, i64 7>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
index 4bbda2152a6f..ad075e4b4e19 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-load-int.ll
@@ -401,14 +401,14 @@ define void @masked_load_v32i64(ptr %a, ptr %m_ptr, ptr %res_ptr) nounwind {
; RV32: # %bb.0:
; RV32-NEXT: addi a3, a1, 128
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vle64.v v16, (a3)
; RV32-NEXT: vle64.v v0, (a1)
+; RV32-NEXT: vle64.v v24, (a3)
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.i v24, 0
+; RV32-NEXT: vmv.v.i v16, 0
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vmseq.vv v8, v0, v24
-; RV32-NEXT: vmseq.vv v0, v16, v24
+; RV32-NEXT: vmseq.vv v8, v0, v16
+; RV32-NEXT: vmseq.vv v0, v24, v16
; RV32-NEXT: addi a1, a0, 128
; RV32-NEXT: vle64.v v16, (a1), v0.t
; RV32-NEXT: vmv1r.v v0, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index 42e52436a7da..e6852c1b5751 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -335,18 +335,18 @@ define void @mscatter_truemask_v4i8(<4 x i8> %val, <4 x ptr> %ptrs) {
;
; RV64ZVE32F-LABEL: mscatter_truemask_v4i8:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: ld a1, 24(a0)
-; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 16(a0)
-; RV64ZVE32F-NEXT: ld a0, 8(a0)
+; RV64ZVE32F-NEXT: ld a1, 0(a0)
+; RV64ZVE32F-NEXT: ld a2, 24(a0)
+; RV64ZVE32F-NEXT: ld a3, 8(a0)
+; RV64ZVE32F-NEXT: ld a0, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT: vse8.v v8, (a2)
+; RV64ZVE32F-NEXT: vse8.v v8, (a1)
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT: vse8.v v9, (a0)
-; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; RV64ZVE32F-NEXT: vse8.v v9, (a3)
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
+; RV64ZVE32F-NEXT: vse8.v v9, (a0)
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3
-; RV64ZVE32F-NEXT: vse8.v v8, (a1)
+; RV64ZVE32F-NEXT: vse8.v v8, (a2)
; RV64ZVE32F-NEXT: ret
call void @llvm.masked.scatter.v4i8.v4p0(<4 x i8> %val, <4 x ptr> %ptrs, i32 1, <4 x i1> splat (i1 1))
ret void
@@ -504,8 +504,8 @@ define void @mscatter_baseidx_v8i8(<8 x i8> %val, ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: .LBB9_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB9_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -526,8 +526,8 @@ define void @mscatter_baseidx_v8i8(<8 x i8> %val, ptr %base, <8 x i8> %idxs, <8
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse8.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB9_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB9_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -689,11 +689,11 @@ define void @mscatter_v2i32_truncstore_v2i16(<2 x i32> %val, <2 x ptr> %ptrs, <2
;
; RV64ZVE32F-LABEL: mscatter_v2i32_truncstore_v2i16:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vnsrl.wi v8, v8, 0
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a2, v0
; RV64ZVE32F-NEXT: andi a3, a2, 1
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vnsrl.wi v8, v8, 0
; RV64ZVE32F-NEXT: bnez a3, .LBB12_3
; RV64ZVE32F-NEXT: # %bb.1: # %else
; RV64ZVE32F-NEXT: andi a2, a2, 2
@@ -747,13 +747,14 @@ define void @mscatter_v2i64_truncstore_v2i16(<2 x i64> %val, <2 x ptr> %ptrs, <2
;
; RV64ZVE32F-LABEL: mscatter_v2i64_truncstore_v2i16:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.s.x v9, a1
; RV64ZVE32F-NEXT: vmv.s.x v8, a0
-; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
-; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a0, v0
; RV64ZVE32F-NEXT: andi a1, a0, 1
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
+; RV64ZVE32F-NEXT: vslideup.vi v8, v9, 1
; RV64ZVE32F-NEXT: bnez a1, .LBB13_3
; RV64ZVE32F-NEXT: # %bb.1: # %else
; RV64ZVE32F-NEXT: andi a0, a0, 2
@@ -852,18 +853,18 @@ define void @mscatter_truemask_v4i16(<4 x i16> %val, <4 x ptr> %ptrs) {
;
; RV64ZVE32F-LABEL: mscatter_truemask_v4i16:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: ld a1, 24(a0)
-; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 16(a0)
-; RV64ZVE32F-NEXT: ld a0, 8(a0)
+; RV64ZVE32F-NEXT: ld a1, 0(a0)
+; RV64ZVE32F-NEXT: ld a2, 24(a0)
+; RV64ZVE32F-NEXT: ld a3, 8(a0)
+; RV64ZVE32F-NEXT: ld a0, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vse16.v v8, (a2)
+; RV64ZVE32F-NEXT: vse16.v v8, (a1)
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT: vse16.v v9, (a0)
-; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; RV64ZVE32F-NEXT: vse16.v v9, (a3)
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
+; RV64ZVE32F-NEXT: vse16.v v9, (a0)
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3
-; RV64ZVE32F-NEXT: vse16.v v8, (a1)
+; RV64ZVE32F-NEXT: vse16.v v8, (a2)
; RV64ZVE32F-NEXT: ret
call void @llvm.masked.scatter.v4i16.v4p0(<4 x i16> %val, <4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1))
ret void
@@ -1025,8 +1026,8 @@ define void @mscatter_baseidx_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: .LBB18_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB18_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -1048,8 +1049,8 @@ define void @mscatter_baseidx_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB18_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB18_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -1158,8 +1159,8 @@ define void @mscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB19_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB19_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -1181,8 +1182,8 @@ define void @mscatter_baseidx_sext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB19_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB19_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -1292,8 +1293,8 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB20_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB20_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -1316,8 +1317,8 @@ define void @mscatter_baseidx_zext_v8i8_v8i16(<8 x i16> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB20_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB20_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -1430,8 +1431,8 @@ define void @mscatter_baseidx_v8i16(<8 x i16> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: .LBB21_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB21_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -1453,8 +1454,8 @@ define void @mscatter_baseidx_v8i16(<8 x i16> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB21_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB21_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -1625,11 +1626,12 @@ define void @mscatter_v2i64_truncstore_v2i32(<2 x i64> %val, <2 x ptr> %ptrs, <2
; RV64ZVE32F: # %bb.0:
; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vmv.v.x v8, a0
-; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
; RV64ZVE32F-NEXT: vsetvli zero, zero, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vmv.x.s a0, v0
-; RV64ZVE32F-NEXT: andi a1, a0, 1
-; RV64ZVE32F-NEXT: bnez a1, .LBB24_3
+; RV64ZVE32F-NEXT: andi a4, a0, 1
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vslide1down.vx v8, v8, a1
+; RV64ZVE32F-NEXT: bnez a4, .LBB24_3
; RV64ZVE32F-NEXT: # %bb.1: # %else
; RV64ZVE32F-NEXT: andi a0, a0, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB24_4
@@ -1727,18 +1729,18 @@ define void @mscatter_truemask_v4i32(<4 x i32> %val, <4 x ptr> %ptrs) {
;
; RV64ZVE32F-LABEL: mscatter_truemask_v4i32:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: ld a1, 24(a0)
-; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 16(a0)
-; RV64ZVE32F-NEXT: ld a0, 8(a0)
+; RV64ZVE32F-NEXT: ld a1, 0(a0)
+; RV64ZVE32F-NEXT: ld a2, 24(a0)
+; RV64ZVE32F-NEXT: ld a3, 8(a0)
+; RV64ZVE32F-NEXT: ld a0, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vse32.v v8, (a2)
+; RV64ZVE32F-NEXT: vse32.v v8, (a1)
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v9, (a0)
-; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; RV64ZVE32F-NEXT: vse32.v v9, (a3)
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
+; RV64ZVE32F-NEXT: vse32.v v9, (a0)
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v8, (a1)
+; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: ret
call void @llvm.masked.scatter.v4i32.v4p0(<4 x i32> %val, <4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1))
ret void
@@ -1903,8 +1905,8 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: .LBB29_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB29_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -1927,8 +1929,8 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB29_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB29_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -1940,8 +1942,9 @@ define void @mscatter_baseidx_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB29_6
@@ -2039,8 +2042,8 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB30_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB30_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -2063,8 +2066,8 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB30_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB30_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -2076,8 +2079,9 @@ define void @mscatter_baseidx_sext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB30_6
@@ -2179,8 +2183,8 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB31_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB31_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -2204,8 +2208,8 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB31_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB31_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -2218,8 +2222,9 @@ define void @mscatter_baseidx_zext_v8i8_v8i32(<8 x i32> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB31_6
@@ -2323,8 +2328,8 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: .LBB32_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB32_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -2347,8 +2352,8 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB32_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB32_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -2360,8 +2365,9 @@ define void @mscatter_baseidx_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB32_6
@@ -2460,8 +2466,8 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB33_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB33_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -2484,8 +2490,8 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB33_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB33_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -2497,8 +2503,9 @@ define void @mscatter_baseidx_sext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB33_6
@@ -2601,8 +2608,8 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB34_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB34_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -2626,8 +2633,8 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: .LBB34_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB34_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -2640,8 +2647,9 @@ define void @mscatter_baseidx_zext_v8i16_v8i32(<8 x i32> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: andi a3, a2, 8
; RV64ZVE32F-NEXT: beqz a3, .LBB34_6
@@ -2742,8 +2750,8 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs,
; RV64ZVE32F-NEXT: .LBB35_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB35_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -2766,8 +2774,8 @@ define void @mscatter_baseidx_v8i32(<8 x i32> %val, ptr %base, <8 x i32> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB35_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v12, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB35_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -3074,17 +3082,17 @@ define void @mscatter_truemask_v4i64(<4 x i64> %val, <4 x ptr> %ptrs) {
; RV32ZVE32F-NEXT: lw a3, 20(a0)
; RV32ZVE32F-NEXT: lw a4, 16(a0)
; RV32ZVE32F-NEXT: lw a5, 12(a0)
-; RV32ZVE32F-NEXT: lw a6, 8(a0)
-; RV32ZVE32F-NEXT: lw a7, 0(a0)
-; RV32ZVE32F-NEXT: lw a0, 4(a0)
+; RV32ZVE32F-NEXT: lw a6, 0(a0)
+; RV32ZVE32F-NEXT: lw a7, 4(a0)
+; RV32ZVE32F-NEXT: lw a0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s t0, v8
-; RV32ZVE32F-NEXT: sw a7, 0(t0)
-; RV32ZVE32F-NEXT: sw a0, 4(t0)
+; RV32ZVE32F-NEXT: sw a6, 0(t0)
+; RV32ZVE32F-NEXT: sw a7, 4(t0)
; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a0, v9
-; RV32ZVE32F-NEXT: sw a6, 0(a0)
-; RV32ZVE32F-NEXT: sw a5, 4(a0)
+; RV32ZVE32F-NEXT: vmv.x.s a6, v9
+; RV32ZVE32F-NEXT: sw a0, 0(a6)
+; RV32ZVE32F-NEXT: sw a5, 4(a6)
; RV32ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v9
; RV32ZVE32F-NEXT: sw a4, 0(a0)
@@ -3383,42 +3391,43 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB42_10
+; RV32ZVE32F-NEXT: vmv.x.s t0, v0
+; RV32ZVE32F-NEXT: andi s2, t0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB42_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB42_11
; RV32ZVE32F-NEXT: .LBB42_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB42_12
; RV32ZVE32F-NEXT: .LBB42_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB42_13
; RV32ZVE32F-NEXT: .LBB42_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB42_14
; RV32ZVE32F-NEXT: .LBB42_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB42_15
; RV32ZVE32F-NEXT: .LBB42_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB42_16
; RV32ZVE32F-NEXT: .LBB42_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB42_9
; RV32ZVE32F-NEXT: .LBB42_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3433,45 +3442,44 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB42_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB42_2
; RV32ZVE32F-NEXT: .LBB42_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB42_3
; RV32ZVE32F-NEXT: .LBB42_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB42_4
; RV32ZVE32F-NEXT: .LBB42_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB42_5
; RV32ZVE32F-NEXT: .LBB42_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB42_6
; RV32ZVE32F-NEXT: .LBB42_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3479,7 +3487,7 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a7, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB42_7
; RV32ZVE32F-NEXT: .LBB42_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3487,7 +3495,7 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB42_8
; RV32ZVE32F-NEXT: j .LBB42_9
;
@@ -3523,8 +3531,8 @@ define void @mscatter_baseidx_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8> %id
; RV64ZVE32F-NEXT: .LBB42_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB42_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -3627,42 +3635,43 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB43_10
+; RV32ZVE32F-NEXT: vmv.x.s t0, v0
+; RV32ZVE32F-NEXT: andi s2, t0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB43_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB43_11
; RV32ZVE32F-NEXT: .LBB43_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB43_12
; RV32ZVE32F-NEXT: .LBB43_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB43_13
; RV32ZVE32F-NEXT: .LBB43_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB43_14
; RV32ZVE32F-NEXT: .LBB43_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB43_15
; RV32ZVE32F-NEXT: .LBB43_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB43_16
; RV32ZVE32F-NEXT: .LBB43_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB43_9
; RV32ZVE32F-NEXT: .LBB43_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3677,45 +3686,44 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB43_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB43_2
; RV32ZVE32F-NEXT: .LBB43_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB43_3
; RV32ZVE32F-NEXT: .LBB43_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB43_4
; RV32ZVE32F-NEXT: .LBB43_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB43_5
; RV32ZVE32F-NEXT: .LBB43_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB43_6
; RV32ZVE32F-NEXT: .LBB43_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3723,7 +3731,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a7, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB43_7
; RV32ZVE32F-NEXT: .LBB43_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3731,7 +3739,7 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB43_8
; RV32ZVE32F-NEXT: j .LBB43_9
;
@@ -3767,8 +3775,8 @@ define void @mscatter_baseidx_sext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB43_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB43_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -3873,42 +3881,43 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB44_10
+; RV32ZVE32F-NEXT: vmv.x.s t0, v0
+; RV32ZVE32F-NEXT: andi s2, t0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB44_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB44_11
; RV32ZVE32F-NEXT: .LBB44_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB44_12
; RV32ZVE32F-NEXT: .LBB44_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB44_13
; RV32ZVE32F-NEXT: .LBB44_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB44_14
; RV32ZVE32F-NEXT: .LBB44_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB44_15
; RV32ZVE32F-NEXT: .LBB44_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB44_16
; RV32ZVE32F-NEXT: .LBB44_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB44_9
; RV32ZVE32F-NEXT: .LBB44_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3923,45 +3932,44 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB44_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB44_2
; RV32ZVE32F-NEXT: .LBB44_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB44_3
; RV32ZVE32F-NEXT: .LBB44_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB44_4
; RV32ZVE32F-NEXT: .LBB44_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB44_5
; RV32ZVE32F-NEXT: .LBB44_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB44_6
; RV32ZVE32F-NEXT: .LBB44_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3969,7 +3977,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a7, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB44_7
; RV32ZVE32F-NEXT: .LBB44_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -3977,7 +3985,7 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB44_8
; RV32ZVE32F-NEXT: j .LBB44_9
;
@@ -4015,8 +4023,8 @@ define void @mscatter_baseidx_zext_v8i8_v8i64(<8 x i64> %val, ptr %base, <8 x i8
; RV64ZVE32F-NEXT: .LBB44_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB44_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -4126,42 +4134,43 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB45_10
+; RV32ZVE32F-NEXT: vmv.x.s t0, v0
+; RV32ZVE32F-NEXT: andi s2, t0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB45_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB45_11
; RV32ZVE32F-NEXT: .LBB45_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB45_12
; RV32ZVE32F-NEXT: .LBB45_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB45_13
; RV32ZVE32F-NEXT: .LBB45_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB45_14
; RV32ZVE32F-NEXT: .LBB45_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB45_15
; RV32ZVE32F-NEXT: .LBB45_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB45_16
; RV32ZVE32F-NEXT: .LBB45_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB45_9
; RV32ZVE32F-NEXT: .LBB45_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4176,45 +4185,44 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB45_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB45_2
; RV32ZVE32F-NEXT: .LBB45_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB45_3
; RV32ZVE32F-NEXT: .LBB45_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB45_4
; RV32ZVE32F-NEXT: .LBB45_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB45_5
; RV32ZVE32F-NEXT: .LBB45_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB45_6
; RV32ZVE32F-NEXT: .LBB45_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4222,7 +4230,7 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a7, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB45_7
; RV32ZVE32F-NEXT: .LBB45_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4230,7 +4238,7 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB45_8
; RV32ZVE32F-NEXT: j .LBB45_9
;
@@ -4267,8 +4275,8 @@ define void @mscatter_baseidx_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i16> %
; RV64ZVE32F-NEXT: .LBB45_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB45_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -4371,42 +4379,43 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB46_10
+; RV32ZVE32F-NEXT: vmv.x.s t0, v0
+; RV32ZVE32F-NEXT: andi s2, t0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB46_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB46_11
; RV32ZVE32F-NEXT: .LBB46_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB46_12
; RV32ZVE32F-NEXT: .LBB46_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB46_13
; RV32ZVE32F-NEXT: .LBB46_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB46_14
; RV32ZVE32F-NEXT: .LBB46_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB46_15
; RV32ZVE32F-NEXT: .LBB46_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB46_16
; RV32ZVE32F-NEXT: .LBB46_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB46_9
; RV32ZVE32F-NEXT: .LBB46_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4421,45 +4430,44 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB46_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB46_2
; RV32ZVE32F-NEXT: .LBB46_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB46_3
; RV32ZVE32F-NEXT: .LBB46_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB46_4
; RV32ZVE32F-NEXT: .LBB46_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB46_5
; RV32ZVE32F-NEXT: .LBB46_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB46_6
; RV32ZVE32F-NEXT: .LBB46_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4467,7 +4475,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a7, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB46_7
; RV32ZVE32F-NEXT: .LBB46_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4475,7 +4483,7 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB46_8
; RV32ZVE32F-NEXT: j .LBB46_9
;
@@ -4512,8 +4520,8 @@ define void @mscatter_baseidx_sext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB46_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB46_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -4618,42 +4626,43 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB47_10
+; RV32ZVE32F-NEXT: vmv.x.s t0, v0
+; RV32ZVE32F-NEXT: andi s2, t0, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB47_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB47_11
; RV32ZVE32F-NEXT: .LBB47_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB47_12
; RV32ZVE32F-NEXT: .LBB47_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB47_13
; RV32ZVE32F-NEXT: .LBB47_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB47_14
; RV32ZVE32F-NEXT: .LBB47_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB47_15
; RV32ZVE32F-NEXT: .LBB47_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB47_16
; RV32ZVE32F-NEXT: .LBB47_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB47_9
; RV32ZVE32F-NEXT: .LBB47_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4668,45 +4677,44 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB47_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, t0, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB47_2
; RV32ZVE32F-NEXT: .LBB47_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB47_3
; RV32ZVE32F-NEXT: .LBB47_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB47_4
; RV32ZVE32F-NEXT: .LBB47_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB47_5
; RV32ZVE32F-NEXT: .LBB47_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, t0, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB47_6
; RV32ZVE32F-NEXT: .LBB47_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4714,7 +4722,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a7, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, t0, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB47_7
; RV32ZVE32F-NEXT: .LBB47_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4722,7 +4730,7 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, t0, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB47_8
; RV32ZVE32F-NEXT: j .LBB47_9
;
@@ -4763,8 +4771,8 @@ define void @mscatter_baseidx_zext_v8i16_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB47_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a0, a5, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB47_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -4872,42 +4880,43 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
; RV32ZVE32F-NEXT: lw a4, 52(a0)
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
-; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t0, 40(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB48_10
+; RV32ZVE32F-NEXT: vmv.x.s a7, v0
+; RV32ZVE32F-NEXT: andi s2, a7, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB48_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a7, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB48_11
; RV32ZVE32F-NEXT: .LBB48_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, a7, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB48_12
; RV32ZVE32F-NEXT: .LBB48_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, a7, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB48_13
; RV32ZVE32F-NEXT: .LBB48_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, a7, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB48_14
; RV32ZVE32F-NEXT: .LBB48_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, a7, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB48_15
; RV32ZVE32F-NEXT: .LBB48_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a7, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB48_16
; RV32ZVE32F-NEXT: .LBB48_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a7, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB48_9
; RV32ZVE32F-NEXT: .LBB48_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4922,53 +4931,52 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB48_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a7, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB48_2
; RV32ZVE32F-NEXT: .LBB48_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB48_3
; RV32ZVE32F-NEXT: .LBB48_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB48_4
; RV32ZVE32F-NEXT: .LBB48_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB48_5
; RV32ZVE32F-NEXT: .LBB48_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB48_6
; RV32ZVE32F-NEXT: .LBB48_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw a7, 0(a0)
+; RV32ZVE32F-NEXT: sw t0, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a7, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB48_7
; RV32ZVE32F-NEXT: .LBB48_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -4976,7 +4984,7 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a7, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB48_8
; RV32ZVE32F-NEXT: j .LBB48_9
;
@@ -5013,8 +5021,8 @@ define void @mscatter_baseidx_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i32> %
; RV64ZVE32F-NEXT: .LBB48_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB48_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -5115,42 +5123,43 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: lw a4, 52(a0)
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
-; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t0, 40(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB49_10
+; RV32ZVE32F-NEXT: vmv.x.s a7, v0
+; RV32ZVE32F-NEXT: andi s2, a7, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB49_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a7, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB49_11
; RV32ZVE32F-NEXT: .LBB49_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, a7, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB49_12
; RV32ZVE32F-NEXT: .LBB49_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, a7, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB49_13
; RV32ZVE32F-NEXT: .LBB49_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, a7, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB49_14
; RV32ZVE32F-NEXT: .LBB49_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, a7, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB49_15
; RV32ZVE32F-NEXT: .LBB49_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a7, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB49_16
; RV32ZVE32F-NEXT: .LBB49_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a7, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB49_9
; RV32ZVE32F-NEXT: .LBB49_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5165,53 +5174,52 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB49_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a7, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB49_2
; RV32ZVE32F-NEXT: .LBB49_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB49_3
; RV32ZVE32F-NEXT: .LBB49_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB49_4
; RV32ZVE32F-NEXT: .LBB49_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB49_5
; RV32ZVE32F-NEXT: .LBB49_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB49_6
; RV32ZVE32F-NEXT: .LBB49_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw a7, 0(a0)
+; RV32ZVE32F-NEXT: sw t0, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a7, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB49_7
; RV32ZVE32F-NEXT: .LBB49_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5219,7 +5227,7 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a7, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB49_8
; RV32ZVE32F-NEXT: j .LBB49_9
;
@@ -5256,8 +5264,8 @@ define void @mscatter_baseidx_sext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB49_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB49_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -5359,42 +5367,43 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: lw a4, 52(a0)
; RV32ZVE32F-NEXT: lw a5, 48(a0)
; RV32ZVE32F-NEXT: lw a6, 44(a0)
-; RV32ZVE32F-NEXT: lw a7, 40(a0)
-; RV32ZVE32F-NEXT: lw t0, 36(a0)
-; RV32ZVE32F-NEXT: lw t1, 32(a0)
-; RV32ZVE32F-NEXT: lw t2, 28(a0)
-; RV32ZVE32F-NEXT: lw t3, 24(a0)
-; RV32ZVE32F-NEXT: lw t4, 20(a0)
-; RV32ZVE32F-NEXT: lw t5, 16(a0)
-; RV32ZVE32F-NEXT: lw s0, 12(a0)
-; RV32ZVE32F-NEXT: lw t6, 8(a0)
+; RV32ZVE32F-NEXT: lw t0, 40(a0)
+; RV32ZVE32F-NEXT: lw t1, 36(a0)
+; RV32ZVE32F-NEXT: lw t2, 32(a0)
+; RV32ZVE32F-NEXT: lw t3, 28(a0)
+; RV32ZVE32F-NEXT: lw t4, 24(a0)
+; RV32ZVE32F-NEXT: lw t5, 20(a0)
+; RV32ZVE32F-NEXT: lw t6, 16(a0)
+; RV32ZVE32F-NEXT: lw s1, 12(a0)
+; RV32ZVE32F-NEXT: lw s0, 8(a0)
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi s1, a1, 1
-; RV32ZVE32F-NEXT: bnez s1, .LBB50_10
+; RV32ZVE32F-NEXT: vmv.x.s a7, v0
+; RV32ZVE32F-NEXT: andi s2, a7, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB50_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a7, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB50_11
; RV32ZVE32F-NEXT: .LBB50_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, a7, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB50_12
; RV32ZVE32F-NEXT: .LBB50_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, a7, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB50_13
; RV32ZVE32F-NEXT: .LBB50_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, a7, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB50_14
; RV32ZVE32F-NEXT: .LBB50_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, a7, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB50_15
; RV32ZVE32F-NEXT: .LBB50_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a7, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB50_16
; RV32ZVE32F-NEXT: .LBB50_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a7, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB50_9
; RV32ZVE32F-NEXT: .LBB50_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5409,53 +5418,52 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: addi sp, sp, 16
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB50_10: # %cond.store
-; RV32ZVE32F-NEXT: lw s1, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw s1, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a7, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB50_2
; RV32ZVE32F-NEXT: .LBB50_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw s0, 4(a0)
-; RV32ZVE32F-NEXT: sw t6, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: sw s1, 4(a0)
+; RV32ZVE32F-NEXT: sw s0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB50_3
; RV32ZVE32F-NEXT: .LBB50_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t5, 0(a0)
-; RV32ZVE32F-NEXT: sw t4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: sw t6, 0(a0)
+; RV32ZVE32F-NEXT: sw t5, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB50_4
; RV32ZVE32F-NEXT: .LBB50_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t3, 0(a0)
-; RV32ZVE32F-NEXT: sw t2, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: sw t4, 0(a0)
+; RV32ZVE32F-NEXT: sw t3, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB50_5
; RV32ZVE32F-NEXT: .LBB50_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw t1, 0(a0)
-; RV32ZVE32F-NEXT: sw t0, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: sw t2, 0(a0)
+; RV32ZVE32F-NEXT: sw t1, 4(a0)
+; RV32ZVE32F-NEXT: andi a0, a7, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB50_6
; RV32ZVE32F-NEXT: .LBB50_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
-; RV32ZVE32F-NEXT: sw a7, 0(a0)
+; RV32ZVE32F-NEXT: sw t0, 0(a0)
; RV32ZVE32F-NEXT: sw a6, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a7, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB50_7
; RV32ZVE32F-NEXT: .LBB50_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5463,7 +5471,7 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a5, 0(a0)
; RV32ZVE32F-NEXT: sw a4, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a7, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB50_8
; RV32ZVE32F-NEXT: j .LBB50_9
;
@@ -5502,8 +5510,8 @@ define void @mscatter_baseidx_zext_v8i32_v8i64(<8 x i64> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB50_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a0, a4, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a0, .LBB50_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -5647,31 +5655,32 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, s3
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, s2
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v0
-; RV32ZVE32F-NEXT: andi a2, a1, 1
-; RV32ZVE32F-NEXT: bnez a2, .LBB51_10
+; RV32ZVE32F-NEXT: vmv.x.s a2, v0
+; RV32ZVE32F-NEXT: andi s2, a2, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a1
+; RV32ZVE32F-NEXT: bnez s2, .LBB51_10
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a2, 2
; RV32ZVE32F-NEXT: bnez a0, .LBB51_11
; RV32ZVE32F-NEXT: .LBB51_2: # %else2
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, a2, 4
; RV32ZVE32F-NEXT: bnez a0, .LBB51_12
; RV32ZVE32F-NEXT: .LBB51_3: # %else4
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, a2, 8
; RV32ZVE32F-NEXT: bnez a0, .LBB51_13
; RV32ZVE32F-NEXT: .LBB51_4: # %else6
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, a2, 16
; RV32ZVE32F-NEXT: bnez a0, .LBB51_14
; RV32ZVE32F-NEXT: .LBB51_5: # %else8
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, a2, 32
; RV32ZVE32F-NEXT: bnez a0, .LBB51_15
; RV32ZVE32F-NEXT: .LBB51_6: # %else10
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a2, 64
; RV32ZVE32F-NEXT: bnez a0, .LBB51_16
; RV32ZVE32F-NEXT: .LBB51_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a2, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB51_9
; RV32ZVE32F-NEXT: .LBB51_8: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5691,13 +5700,12 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: addi sp, sp, 32
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB51_10: # %cond.store
-; RV32ZVE32F-NEXT: lw a2, 4(a0)
+; RV32ZVE32F-NEXT: lw a1, 4(a0)
; RV32ZVE32F-NEXT: lw a0, 0(a0)
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vmv.x.s s2, v8
-; RV32ZVE32F-NEXT: sw a2, 4(s2)
+; RV32ZVE32F-NEXT: sw a1, 4(s2)
; RV32ZVE32F-NEXT: sw a0, 0(s2)
-; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: andi a0, a2, 2
; RV32ZVE32F-NEXT: beqz a0, .LBB51_2
; RV32ZVE32F-NEXT: .LBB51_11: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -5705,7 +5713,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw s1, 4(a0)
; RV32ZVE32F-NEXT: sw s0, 0(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: andi a0, a2, 4
; RV32ZVE32F-NEXT: beqz a0, .LBB51_3
; RV32ZVE32F-NEXT: .LBB51_12: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -5713,7 +5721,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t6, 0(a0)
; RV32ZVE32F-NEXT: sw t5, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: andi a0, a2, 8
; RV32ZVE32F-NEXT: beqz a0, .LBB51_4
; RV32ZVE32F-NEXT: .LBB51_13: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
@@ -5721,7 +5729,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t4, 0(a0)
; RV32ZVE32F-NEXT: sw t3, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: andi a0, a2, 16
; RV32ZVE32F-NEXT: beqz a0, .LBB51_5
; RV32ZVE32F-NEXT: .LBB51_14: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5729,7 +5737,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t2, 0(a0)
; RV32ZVE32F-NEXT: sw t1, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: andi a0, a2, 32
; RV32ZVE32F-NEXT: beqz a0, .LBB51_6
; RV32ZVE32F-NEXT: .LBB51_15: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5737,7 +5745,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw t0, 0(a0)
; RV32ZVE32F-NEXT: sw a7, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: andi a0, a2, 64
; RV32ZVE32F-NEXT: beqz a0, .LBB51_7
; RV32ZVE32F-NEXT: .LBB51_16: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -5745,7 +5753,7 @@ define void @mscatter_baseidx_v8i64(<8 x i64> %val, ptr %base, <8 x i64> %idxs,
; RV32ZVE32F-NEXT: vmv.x.s a0, v10
; RV32ZVE32F-NEXT: sw a6, 0(a0)
; RV32ZVE32F-NEXT: sw a5, 4(a0)
-; RV32ZVE32F-NEXT: andi a0, a1, -128
+; RV32ZVE32F-NEXT: andi a0, a2, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB51_8
; RV32ZVE32F-NEXT: j .LBB51_9
;
@@ -6019,18 +6027,18 @@ define void @mscatter_truemask_v4f16(<4 x half> %val, <4 x ptr> %ptrs) {
;
; RV64ZVE32F-LABEL: mscatter_truemask_v4f16:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: ld a1, 24(a0)
-; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 16(a0)
-; RV64ZVE32F-NEXT: ld a0, 8(a0)
+; RV64ZVE32F-NEXT: ld a1, 0(a0)
+; RV64ZVE32F-NEXT: ld a2, 24(a0)
+; RV64ZVE32F-NEXT: ld a3, 8(a0)
+; RV64ZVE32F-NEXT: ld a0, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
-; RV64ZVE32F-NEXT: vse16.v v8, (a2)
+; RV64ZVE32F-NEXT: vse16.v v8, (a1)
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT: vse16.v v9, (a0)
-; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; RV64ZVE32F-NEXT: vse16.v v9, (a3)
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
+; RV64ZVE32F-NEXT: vse16.v v9, (a0)
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3
-; RV64ZVE32F-NEXT: vse16.v v8, (a1)
+; RV64ZVE32F-NEXT: vse16.v v8, (a2)
; RV64ZVE32F-NEXT: ret
call void @llvm.masked.scatter.v4f16.v4p0(<4 x half> %val, <4 x ptr> %ptrs, i32 2, <4 x i1> splat (i1 1))
ret void
@@ -6192,8 +6200,8 @@ define void @mscatter_baseidx_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i8> %i
; RV64ZVE32F-NEXT: .LBB58_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB58_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -6215,8 +6223,8 @@ define void @mscatter_baseidx_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i8> %i
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB58_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB58_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -6325,8 +6333,8 @@ define void @mscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB59_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB59_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -6348,8 +6356,8 @@ define void @mscatter_baseidx_sext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB59_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB59_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -6459,8 +6467,8 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: .LBB60_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB60_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -6483,8 +6491,8 @@ define void @mscatter_baseidx_zext_v8i8_v8f16(<8 x half> %val, ptr %base, <8 x i
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB60_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB60_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -6597,8 +6605,8 @@ define void @mscatter_baseidx_v8f16(<8 x half> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: .LBB61_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB61_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -6620,8 +6628,8 @@ define void @mscatter_baseidx_v8f16(<8 x half> %val, ptr %base, <8 x i16> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 5
; RV64ZVE32F-NEXT: vse16.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB61_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB61_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -6841,18 +6849,18 @@ define void @mscatter_truemask_v4f32(<4 x float> %val, <4 x ptr> %ptrs) {
;
; RV64ZVE32F-LABEL: mscatter_truemask_v4f32:
; RV64ZVE32F: # %bb.0:
-; RV64ZVE32F-NEXT: ld a1, 24(a0)
-; RV64ZVE32F-NEXT: ld a2, 0(a0)
-; RV64ZVE32F-NEXT: ld a3, 16(a0)
-; RV64ZVE32F-NEXT: ld a0, 8(a0)
+; RV64ZVE32F-NEXT: ld a1, 0(a0)
+; RV64ZVE32F-NEXT: ld a2, 24(a0)
+; RV64ZVE32F-NEXT: ld a3, 8(a0)
+; RV64ZVE32F-NEXT: ld a0, 16(a0)
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
-; RV64ZVE32F-NEXT: vse32.v v8, (a2)
+; RV64ZVE32F-NEXT: vse32.v v8, (a1)
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 1
-; RV64ZVE32F-NEXT: vse32.v v9, (a0)
-; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
; RV64ZVE32F-NEXT: vse32.v v9, (a3)
+; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 2
+; RV64ZVE32F-NEXT: vse32.v v9, (a0)
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 3
-; RV64ZVE32F-NEXT: vse32.v v8, (a1)
+; RV64ZVE32F-NEXT: vse32.v v8, (a2)
; RV64ZVE32F-NEXT: ret
call void @llvm.masked.scatter.v4f32.v4p0(<4 x float> %val, <4 x ptr> %ptrs, i32 4, <4 x i1> splat (i1 1))
ret void
@@ -7017,8 +7025,8 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: .LBB68_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB68_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7041,8 +7049,8 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB68_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB68_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -7054,8 +7062,9 @@ define void @mscatter_baseidx_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x i8> %
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB68_6
@@ -7153,8 +7162,8 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB69_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB69_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7177,8 +7186,8 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB69_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB69_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -7190,8 +7199,9 @@ define void @mscatter_baseidx_sext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB69_6
@@ -7293,8 +7303,8 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB70_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB70_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7318,8 +7328,8 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB70_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB70_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -7332,8 +7342,9 @@ define void @mscatter_baseidx_zext_v8i8_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: andi a2, a2, 255
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB70_6
@@ -7437,8 +7448,8 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: .LBB71_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB71_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7461,8 +7472,8 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB71_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB71_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -7474,8 +7485,9 @@ define void @mscatter_baseidx_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x i16>
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB71_6
@@ -7574,8 +7586,8 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB72_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB72_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7598,8 +7610,8 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB72_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB72_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -7611,8 +7623,9 @@ define void @mscatter_baseidx_sext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vmv.x.s a2, v10
; RV64ZVE32F-NEXT: slli a2, a2, 2
; RV64ZVE32F-NEXT: add a2, a0, a2
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a2)
; RV64ZVE32F-NEXT: andi a2, a1, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB72_6
@@ -7715,8 +7728,8 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB73_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB73_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7740,8 +7753,8 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: .LBB73_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB73_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -7754,8 +7767,9 @@ define void @mscatter_baseidx_zext_v8i16_v8f32(<8 x float> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: and a3, a3, a1
; RV64ZVE32F-NEXT: slli a3, a3, 2
; RV64ZVE32F-NEXT: add a3, a0, a3
-; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV64ZVE32F-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 2
+; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v12, (a3)
; RV64ZVE32F-NEXT: andi a3, a2, 8
; RV64ZVE32F-NEXT: beqz a3, .LBB73_6
@@ -7856,8 +7870,8 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: .LBB74_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB74_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -7880,8 +7894,8 @@ define void @mscatter_baseidx_v8f32(<8 x float> %val, ptr %base, <8 x i32> %idxs
; RV64ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vse32.v v10, (a2)
; RV64ZVE32F-NEXT: .LBB74_9: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v12, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB74_15
; RV64ZVE32F-NEXT: # %bb.10: # %else12
@@ -8391,81 +8405,81 @@ define void @mscatter_baseidx_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x i8>
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB81_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_10
; RV32ZVE32F-NEXT: .LBB81_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_11
; RV32ZVE32F-NEXT: .LBB81_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_12
; RV32ZVE32F-NEXT: .LBB81_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_13
; RV32ZVE32F-NEXT: .LBB81_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_14
; RV32ZVE32F-NEXT: .LBB81_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB81_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB81_15
; RV32ZVE32F-NEXT: .LBB81_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB81_16
; RV32ZVE32F-NEXT: .LBB81_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB81_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB81_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB81_2
; RV32ZVE32F-NEXT: .LBB81_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB81_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB81_3
; RV32ZVE32F-NEXT: .LBB81_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB81_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB81_4
; RV32ZVE32F-NEXT: .LBB81_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB81_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB81_5
; RV32ZVE32F-NEXT: .LBB81_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB81_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB81_6
; RV32ZVE32F-NEXT: .LBB81_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB81_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB81_7
; RV32ZVE32F-NEXT: .LBB81_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB81_8
; RV32ZVE32F-NEXT: .LBB81_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -8498,8 +8512,8 @@ define void @mscatter_baseidx_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x i8>
; RV64ZVE32F-NEXT: .LBB81_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB81_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -8591,81 +8605,81 @@ define void @mscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB82_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB82_10
; RV32ZVE32F-NEXT: .LBB82_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB82_11
; RV32ZVE32F-NEXT: .LBB82_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB82_12
; RV32ZVE32F-NEXT: .LBB82_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB82_13
; RV32ZVE32F-NEXT: .LBB82_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB82_14
; RV32ZVE32F-NEXT: .LBB82_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB82_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB82_15
; RV32ZVE32F-NEXT: .LBB82_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB82_16
; RV32ZVE32F-NEXT: .LBB82_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB82_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB82_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB82_2
; RV32ZVE32F-NEXT: .LBB82_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB82_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB82_3
; RV32ZVE32F-NEXT: .LBB82_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB82_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB82_4
; RV32ZVE32F-NEXT: .LBB82_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB82_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB82_5
; RV32ZVE32F-NEXT: .LBB82_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB82_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB82_6
; RV32ZVE32F-NEXT: .LBB82_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB82_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB82_7
; RV32ZVE32F-NEXT: .LBB82_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB82_8
; RV32ZVE32F-NEXT: .LBB82_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -8698,8 +8712,8 @@ define void @mscatter_baseidx_sext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB82_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB82_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -8793,81 +8807,81 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf4 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB83_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB83_10
; RV32ZVE32F-NEXT: .LBB83_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB83_11
; RV32ZVE32F-NEXT: .LBB83_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB83_12
; RV32ZVE32F-NEXT: .LBB83_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB83_13
; RV32ZVE32F-NEXT: .LBB83_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB83_14
; RV32ZVE32F-NEXT: .LBB83_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB83_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB83_15
; RV32ZVE32F-NEXT: .LBB83_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB83_16
; RV32ZVE32F-NEXT: .LBB83_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB83_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB83_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB83_2
; RV32ZVE32F-NEXT: .LBB83_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB83_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB83_3
; RV32ZVE32F-NEXT: .LBB83_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB83_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB83_4
; RV32ZVE32F-NEXT: .LBB83_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB83_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB83_5
; RV32ZVE32F-NEXT: .LBB83_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB83_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB83_6
; RV32ZVE32F-NEXT: .LBB83_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB83_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB83_7
; RV32ZVE32F-NEXT: .LBB83_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB83_8
; RV32ZVE32F-NEXT: .LBB83_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -8902,8 +8916,8 @@ define void @mscatter_baseidx_zext_v8i8_v8f64(<8 x double> %val, ptr %base, <8 x
; RV64ZVE32F-NEXT: .LBB83_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB83_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -9002,81 +9016,81 @@ define void @mscatter_baseidx_v8i16_v8f64(<8 x double> %val, ptr %base, <8 x i16
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB84_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB84_10
; RV32ZVE32F-NEXT: .LBB84_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB84_11
; RV32ZVE32F-NEXT: .LBB84_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB84_12
; RV32ZVE32F-NEXT: .LBB84_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB84_13
; RV32ZVE32F-NEXT: .LBB84_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB84_14
; RV32ZVE32F-NEXT: .LBB84_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB84_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB84_15
; RV32ZVE32F-NEXT: .LBB84_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB84_16
; RV32ZVE32F-NEXT: .LBB84_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB84_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB84_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB84_2
; RV32ZVE32F-NEXT: .LBB84_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB84_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB84_3
; RV32ZVE32F-NEXT: .LBB84_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB84_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB84_4
; RV32ZVE32F-NEXT: .LBB84_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB84_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB84_5
; RV32ZVE32F-NEXT: .LBB84_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB84_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB84_6
; RV32ZVE32F-NEXT: .LBB84_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB84_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB84_7
; RV32ZVE32F-NEXT: .LBB84_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB84_8
; RV32ZVE32F-NEXT: .LBB84_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9110,8 +9124,8 @@ define void @mscatter_baseidx_v8i16_v8f64(<8 x double> %val, ptr %base, <8 x i16
; RV64ZVE32F-NEXT: .LBB84_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB84_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -9203,81 +9217,81 @@ define void @mscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB85_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB85_10
; RV32ZVE32F-NEXT: .LBB85_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB85_11
; RV32ZVE32F-NEXT: .LBB85_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB85_12
; RV32ZVE32F-NEXT: .LBB85_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB85_13
; RV32ZVE32F-NEXT: .LBB85_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB85_14
; RV32ZVE32F-NEXT: .LBB85_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB85_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB85_15
; RV32ZVE32F-NEXT: .LBB85_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB85_16
; RV32ZVE32F-NEXT: .LBB85_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB85_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB85_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB85_2
; RV32ZVE32F-NEXT: .LBB85_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB85_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB85_3
; RV32ZVE32F-NEXT: .LBB85_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB85_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB85_4
; RV32ZVE32F-NEXT: .LBB85_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB85_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB85_5
; RV32ZVE32F-NEXT: .LBB85_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB85_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB85_6
; RV32ZVE32F-NEXT: .LBB85_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB85_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB85_7
; RV32ZVE32F-NEXT: .LBB85_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB85_8
; RV32ZVE32F-NEXT: .LBB85_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9311,8 +9325,8 @@ define void @mscatter_baseidx_sext_v8i16_v8f64(<8 x double> %val, ptr %base, <8
; RV64ZVE32F-NEXT: .LBB85_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB85_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -9406,81 +9420,81 @@ define void @mscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vzext.vf2 v10, v8
; RV32ZVE32F-NEXT: vsll.vi v8, v10, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB86_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB86_10
; RV32ZVE32F-NEXT: .LBB86_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB86_11
; RV32ZVE32F-NEXT: .LBB86_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB86_12
; RV32ZVE32F-NEXT: .LBB86_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB86_13
; RV32ZVE32F-NEXT: .LBB86_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB86_14
; RV32ZVE32F-NEXT: .LBB86_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB86_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB86_15
; RV32ZVE32F-NEXT: .LBB86_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB86_16
; RV32ZVE32F-NEXT: .LBB86_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB86_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB86_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB86_2
; RV32ZVE32F-NEXT: .LBB86_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB86_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB86_3
; RV32ZVE32F-NEXT: .LBB86_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB86_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB86_4
; RV32ZVE32F-NEXT: .LBB86_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB86_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB86_5
; RV32ZVE32F-NEXT: .LBB86_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB86_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB86_6
; RV32ZVE32F-NEXT: .LBB86_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB86_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB86_7
; RV32ZVE32F-NEXT: .LBB86_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB86_8
; RV32ZVE32F-NEXT: .LBB86_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9518,8 +9532,8 @@ define void @mscatter_baseidx_zext_v8i16_v8f64(<8 x double> %val, ptr %base, <8
; RV64ZVE32F-NEXT: .LBB86_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e16, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: andi a3, a2, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e16, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a3, .LBB86_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -9616,81 +9630,81 @@ define void @mscatter_baseidx_v8i32_v8f64(<8 x double> %val, ptr %base, <8 x i32
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB87_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB87_10
; RV32ZVE32F-NEXT: .LBB87_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB87_11
; RV32ZVE32F-NEXT: .LBB87_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB87_12
; RV32ZVE32F-NEXT: .LBB87_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB87_13
; RV32ZVE32F-NEXT: .LBB87_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB87_14
; RV32ZVE32F-NEXT: .LBB87_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB87_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB87_15
; RV32ZVE32F-NEXT: .LBB87_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB87_16
; RV32ZVE32F-NEXT: .LBB87_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB87_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB87_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB87_2
; RV32ZVE32F-NEXT: .LBB87_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB87_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB87_3
; RV32ZVE32F-NEXT: .LBB87_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB87_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB87_4
; RV32ZVE32F-NEXT: .LBB87_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB87_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB87_5
; RV32ZVE32F-NEXT: .LBB87_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB87_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB87_6
; RV32ZVE32F-NEXT: .LBB87_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB87_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB87_7
; RV32ZVE32F-NEXT: .LBB87_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB87_8
; RV32ZVE32F-NEXT: .LBB87_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9724,8 +9738,8 @@ define void @mscatter_baseidx_v8i32_v8f64(<8 x double> %val, ptr %base, <8 x i32
; RV64ZVE32F-NEXT: .LBB87_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB87_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -9815,81 +9829,81 @@ define void @mscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB88_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB88_10
; RV32ZVE32F-NEXT: .LBB88_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB88_11
; RV32ZVE32F-NEXT: .LBB88_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB88_12
; RV32ZVE32F-NEXT: .LBB88_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB88_13
; RV32ZVE32F-NEXT: .LBB88_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB88_14
; RV32ZVE32F-NEXT: .LBB88_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB88_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB88_15
; RV32ZVE32F-NEXT: .LBB88_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB88_16
; RV32ZVE32F-NEXT: .LBB88_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB88_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB88_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB88_2
; RV32ZVE32F-NEXT: .LBB88_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB88_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB88_3
; RV32ZVE32F-NEXT: .LBB88_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB88_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB88_4
; RV32ZVE32F-NEXT: .LBB88_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB88_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB88_5
; RV32ZVE32F-NEXT: .LBB88_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB88_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB88_6
; RV32ZVE32F-NEXT: .LBB88_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB88_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB88_7
; RV32ZVE32F-NEXT: .LBB88_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB88_8
; RV32ZVE32F-NEXT: .LBB88_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -9923,8 +9937,8 @@ define void @mscatter_baseidx_sext_v8i32_v8f64(<8 x double> %val, ptr %base, <8
; RV64ZVE32F-NEXT: .LBB88_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB88_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -10015,81 +10029,81 @@ define void @mscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, ptr %base, <8
; RV32ZVE32F: # %bb.0:
; RV32ZVE32F-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB89_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB89_10
; RV32ZVE32F-NEXT: .LBB89_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB89_11
; RV32ZVE32F-NEXT: .LBB89_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB89_12
; RV32ZVE32F-NEXT: .LBB89_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB89_13
; RV32ZVE32F-NEXT: .LBB89_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB89_14
; RV32ZVE32F-NEXT: .LBB89_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB89_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB89_15
; RV32ZVE32F-NEXT: .LBB89_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB89_16
; RV32ZVE32F-NEXT: .LBB89_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB89_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB89_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB89_2
; RV32ZVE32F-NEXT: .LBB89_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB89_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB89_3
; RV32ZVE32F-NEXT: .LBB89_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB89_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB89_4
; RV32ZVE32F-NEXT: .LBB89_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB89_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB89_5
; RV32ZVE32F-NEXT: .LBB89_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB89_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB89_6
; RV32ZVE32F-NEXT: .LBB89_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB89_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB89_7
; RV32ZVE32F-NEXT: .LBB89_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB89_8
; RV32ZVE32F-NEXT: .LBB89_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10125,8 +10139,8 @@ define void @mscatter_baseidx_zext_v8i32_v8f64(<8 x double> %val, ptr %base, <8
; RV64ZVE32F-NEXT: .LBB89_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e32, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e32, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v8, v8, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB89_12
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -10238,81 +10252,81 @@ define void @mscatter_baseidx_v8f64(<8 x double> %val, ptr %base, <8 x i64> %idx
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, a3
; RV32ZVE32F-NEXT: vslide1down.vx v8, v8, a2
; RV32ZVE32F-NEXT: vsll.vi v8, v8, 3
-; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
; RV32ZVE32F-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a0, v0
-; RV32ZVE32F-NEXT: andi a1, a0, 1
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_9
+; RV32ZVE32F-NEXT: vmv.x.s a1, v0
+; RV32ZVE32F-NEXT: andi a2, a1, 1
+; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
+; RV32ZVE32F-NEXT: vadd.vx v8, v8, a0
+; RV32ZVE32F-NEXT: bnez a2, .LBB90_9
; RV32ZVE32F-NEXT: # %bb.1: # %else
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_10
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: bnez a0, .LBB90_10
; RV32ZVE32F-NEXT: .LBB90_2: # %else2
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_11
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: bnez a0, .LBB90_11
; RV32ZVE32F-NEXT: .LBB90_3: # %else4
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_12
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: bnez a0, .LBB90_12
; RV32ZVE32F-NEXT: .LBB90_4: # %else6
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_13
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: bnez a0, .LBB90_13
; RV32ZVE32F-NEXT: .LBB90_5: # %else8
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_14
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: bnez a0, .LBB90_14
; RV32ZVE32F-NEXT: .LBB90_6: # %else10
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: bnez a1, .LBB90_15
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: bnez a0, .LBB90_15
; RV32ZVE32F-NEXT: .LBB90_7: # %else12
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: bnez a0, .LBB90_16
; RV32ZVE32F-NEXT: .LBB90_8: # %else14
; RV32ZVE32F-NEXT: ret
; RV32ZVE32F-NEXT: .LBB90_9: # %cond.store
-; RV32ZVE32F-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; RV32ZVE32F-NEXT: vmv.x.s a1, v8
-; RV32ZVE32F-NEXT: fsd fa0, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 2
-; RV32ZVE32F-NEXT: beqz a1, .LBB90_2
+; RV32ZVE32F-NEXT: vmv.x.s a0, v8
+; RV32ZVE32F-NEXT: fsd fa0, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 2
+; RV32ZVE32F-NEXT: beqz a0, .LBB90_2
; RV32ZVE32F-NEXT: .LBB90_10: # %cond.store1
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 1
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa1, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 4
-; RV32ZVE32F-NEXT: beqz a1, .LBB90_3
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa1, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 4
+; RV32ZVE32F-NEXT: beqz a0, .LBB90_3
; RV32ZVE32F-NEXT: .LBB90_11: # %cond.store3
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 2
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa2, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 8
-; RV32ZVE32F-NEXT: beqz a1, .LBB90_4
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa2, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 8
+; RV32ZVE32F-NEXT: beqz a0, .LBB90_4
; RV32ZVE32F-NEXT: .LBB90_12: # %cond.store5
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 3
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa3, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 16
-; RV32ZVE32F-NEXT: beqz a1, .LBB90_5
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa3, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 16
+; RV32ZVE32F-NEXT: beqz a0, .LBB90_5
; RV32ZVE32F-NEXT: .LBB90_13: # %cond.store7
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 4
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa4, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 32
-; RV32ZVE32F-NEXT: beqz a1, .LBB90_6
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa4, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 32
+; RV32ZVE32F-NEXT: beqz a0, .LBB90_6
; RV32ZVE32F-NEXT: .LBB90_14: # %cond.store9
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 5
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa5, 0(a1)
-; RV32ZVE32F-NEXT: andi a1, a0, 64
-; RV32ZVE32F-NEXT: beqz a1, .LBB90_7
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa5, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, 64
+; RV32ZVE32F-NEXT: beqz a0, .LBB90_7
; RV32ZVE32F-NEXT: .LBB90_15: # %cond.store11
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
; RV32ZVE32F-NEXT: vslidedown.vi v10, v8, 6
-; RV32ZVE32F-NEXT: vmv.x.s a1, v10
-; RV32ZVE32F-NEXT: fsd fa6, 0(a1)
-; RV32ZVE32F-NEXT: andi a0, a0, -128
+; RV32ZVE32F-NEXT: vmv.x.s a0, v10
+; RV32ZVE32F-NEXT: fsd fa6, 0(a0)
+; RV32ZVE32F-NEXT: andi a0, a1, -128
; RV32ZVE32F-NEXT: beqz a0, .LBB90_8
; RV32ZVE32F-NEXT: .LBB90_16: # %cond.store13
; RV32ZVE32F-NEXT: vsetivli zero, 1, e32, m2, ta, ma
@@ -10454,8 +10468,8 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB91_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB91_25
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -10471,8 +10485,8 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 4
; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB91_8: # %else8
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 32
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB91_10
; RV64ZVE32F-NEXT: # %bb.9: # %cond.store9
@@ -10484,8 +10498,8 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 5
; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB91_10: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB91_27
; RV64ZVE32F-NEXT: # %bb.11: # %else12
@@ -10508,8 +10522,8 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB91_15: # %else18
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v9, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 1024
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v9, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB91_30
; RV64ZVE32F-NEXT: # %bb.16: # %else20
@@ -10530,8 +10544,8 @@ define void @mscatter_baseidx_v16i8(<16 x i8> %val, ptr %base, <16 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v9, v8, 13
; RV64ZVE32F-NEXT: vse8.v v9, (a2)
; RV64ZVE32F-NEXT: .LBB91_20: # %else26
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 49
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v9, v10, 2
; RV64ZVE32F-NEXT: bgez a2, .LBB91_22
; RV64ZVE32F-NEXT: # %bb.21: # %cond.store27
@@ -10653,11 +10667,11 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 16
; RV64-NEXT: vslidedown.vi v10, v10, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf8 v16, v10
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
-; RV64-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsext.vf8 v16, v10
+; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, ma
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
;
@@ -10686,8 +10700,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB92_4: # %else2
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 4
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB92_49
; RV64ZVE32F-NEXT: # %bb.5: # %else4
@@ -10703,8 +10717,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 4
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_8: # %else8
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 32
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 8
; RV64ZVE32F-NEXT: beqz a2, .LBB92_10
; RV64ZVE32F-NEXT: # %bb.9: # %cond.store9
@@ -10716,8 +10730,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v14, v8, 5
; RV64ZVE32F-NEXT: vse8.v v14, (a2)
; RV64ZVE32F-NEXT: .LBB92_10: # %else10
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 64
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v13, 2
; RV64ZVE32F-NEXT: bnez a2, .LBB92_51
; RV64ZVE32F-NEXT: # %bb.11: # %else12
@@ -10740,8 +10754,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB92_15: # %else18
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v13, v12, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: andi a2, a1, 1024
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v12, 2
; RV64ZVE32F-NEXT: beqz a2, .LBB92_17
; RV64ZVE32F-NEXT: # %bb.16: # %cond.store19
@@ -10762,8 +10776,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v12, v8, 11
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_19: # %else22
-; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 51
+; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 16
; RV64ZVE32F-NEXT: bgez a2, .LBB92_21
; RV64ZVE32F-NEXT: # %bb.20: # %cond.store23
@@ -10784,8 +10798,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vslidedown.vi v11, v8, 13
; RV64ZVE32F-NEXT: vse8.v v11, (a2)
; RV64ZVE32F-NEXT: .LBB92_23: # %else26
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 49
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v13, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB92_54
; RV64ZVE32F-NEXT: # %bb.24: # %else28
@@ -10809,8 +10823,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB92_28: # %else34
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 45
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v12, v10, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB92_57
; RV64ZVE32F-NEXT: # %bb.29: # %else36
@@ -10827,8 +10841,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_32: # %else40
-; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 42
+; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 8
; RV64ZVE32F-NEXT: bgez a2, .LBB92_34
; RV64ZVE32F-NEXT: # %bb.33: # %cond.store41
@@ -10841,8 +10855,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_34: # %else42
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 41
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v11, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB92_59
; RV64ZVE32F-NEXT: # %bb.35: # %else44
@@ -10866,8 +10880,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: .LBB92_39: # %else50
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v11, v10, 4
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 37
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v10, 2
; RV64ZVE32F-NEXT: bltz a2, .LBB92_62
; RV64ZVE32F-NEXT: # %bb.40: # %else52
@@ -10889,8 +10903,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
; RV64ZVE32F-NEXT: vsetivli zero, 1, e8, m1, ta, ma
; RV64ZVE32F-NEXT: vse8.v v12, (a2)
; RV64ZVE32F-NEXT: .LBB92_44: # %else58
-; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: slli a2, a1, 33
+; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64ZVE32F-NEXT: vslidedown.vi v10, v11, 2
; RV64ZVE32F-NEXT: bgez a2, .LBB92_46
; RV64ZVE32F-NEXT: # %bb.45: # %cond.store59
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
index b3011d0f01ca..86c28247e97e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-store-int.ll
@@ -401,54 +401,41 @@ define void @masked_store_v32i64(ptr %val_ptr, ptr %a, ptr %m_ptr) nounwind {
; RV32: # %bb.0:
; RV32-NEXT: addi sp, sp, -16
; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: li a4, 18
-; RV32-NEXT: mul a3, a3, a4
+; RV32-NEXT: slli a3, a3, 4
; RV32-NEXT: sub sp, sp, a3
; RV32-NEXT: addi a3, a2, 128
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vle64.v v24, (a2)
; RV32-NEXT: vle64.v v8, (a3)
-; RV32-NEXT: csrr a3, vlenb
-; RV32-NEXT: slli a4, a3, 3
-; RV32-NEXT: add a3, a4, a3
-; RV32-NEXT: add a3, sp, a3
-; RV32-NEXT: addi a3, a3, 16
-; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill
-; RV32-NEXT: vle64.v v0, (a2)
-; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
-; RV32-NEXT: vmv.v.i v24, 0
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV32-NEXT: vmseq.vv v8, v0, v24
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: slli a2, a2, 3
; RV32-NEXT: add a2, sp, a2
; RV32-NEXT: addi a2, a2, 16
-; RV32-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; RV32-NEXT: li a2, 32
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; RV32-NEXT: vmv.v.i v8, 0
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV32-NEXT: vmseq.vv v7, v24, v8
; RV32-NEXT: addi a2, a0, 128
-; RV32-NEXT: vle64.v v8, (a2)
+; RV32-NEXT: vle64.v v24, (a2)
; RV32-NEXT: vle64.v v16, (a0)
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a2, a0, 3
-; RV32-NEXT: add a0, a2, a0
+; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; RV32-NEXT: vmseq.vv v0, v16, v24
+; RV32-NEXT: vmseq.vv v0, v16, v8
; RV32-NEXT: addi a0, a1, 128
-; RV32-NEXT: vse64.v v8, (a0), v0.t
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 3
-; RV32-NEXT: add a0, sp, a0
-; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vse64.v v24, (a0), v0.t
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vse64.v v8, (a1), v0.t
; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 18
-; RV32-NEXT: mul a0, a0, a1
+; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: add sp, sp, a0
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
index 7be015e26b09..93b4f7d2a9c9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-nearbyint-vp.ll
@@ -135,16 +135,16 @@ declare <16 x half> @llvm.vp.nearbyint.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_nearbyint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
@@ -271,8 +271,8 @@ define <8 x float> @vp_nearbyint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -315,8 +315,8 @@ define <16 x float> @vp_nearbyint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zer
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -393,16 +393,16 @@ declare <4 x double> @llvm.vp.nearbyint.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_nearbyint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
@@ -437,16 +437,16 @@ declare <8 x double> @llvm.vp.nearbyint.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_nearbyint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
@@ -481,16 +481,16 @@ declare <15 x double> @llvm.vp.nearbyint.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_nearbyint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -525,16 +525,16 @@ declare <16 x double> @llvm.vp.nearbyint.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_nearbyint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -569,17 +569,9 @@ declare <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
@@ -588,43 +580,36 @@ define <32 x double> @vp_nearbyint_v32f64(<32 x double> %va, <32 x i1> %m, i32 z
; CHECK-NEXT: .LBB26_2:
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: frflags a1
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: fsflags a1
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v24, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v24, v16, v24, v0.t
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: vmv.v.v v16, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.nearbyint.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
ret <32 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
index 03624113a826..c0bd49cc9c5c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-formation.ll
@@ -163,12 +163,12 @@ define i32 @reduce_sum_16xi32_prefix5(ptr %p) {
; CHECK-NEXT: li a1, -32
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vmv.v.i v8, -1
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vmerge.vim v10, v10, 0, v0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vand.vv v8, v10, v12
+; CHECK-NEXT: vsext.vf4 v12, v10
+; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: vmv.s.x v10, zero
; CHECK-NEXT: vredsum.vs v8, v8, v10
; CHECK-NEXT: vmv.x.s a0, v8
@@ -192,12 +192,12 @@ define i32 @reduce_sum_16xi32_prefix6(ptr %p) {
; CHECK-NEXT: li a1, 192
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vmv.v.i v8, -1
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vmerge.vim v10, v10, 0, v0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vand.vv v8, v10, v12
+; CHECK-NEXT: vsext.vf4 v12, v10
+; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: vmv.s.x v10, zero
; CHECK-NEXT: vredsum.vs v8, v8, v10
; CHECK-NEXT: vmv.x.s a0, v8
@@ -221,10 +221,10 @@ define i32 @reduce_sum_16xi32_prefix7(ptr %p) {
; CHECK-LABEL: reduce_sum_16xi32_prefix7:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, zero
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vredsum.vs v8, v10, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.s.x v10, zero
+; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vredsum.vs v8, v8, v10
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
@@ -248,9 +248,9 @@ define i32 @reduce_sum_16xi32_prefix8(ptr %p) {
; CHECK-LABEL: reduce_sum_16xi32_prefix8:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, zero
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vredsum.vs v8, v10, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.s.x v10, zero
+; CHECK-NEXT: vredsum.vs v8, v8, v10
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
@@ -535,12 +535,12 @@ define i32 @reduce_xor_16xi32_prefix5(ptr %p) {
; CHECK-NEXT: li a1, -32
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vmv.v.i v8, -1
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vmerge.vim v10, v10, 0, v0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vand.vv v8, v10, v12
+; CHECK-NEXT: vsext.vf4 v12, v10
+; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: vmv.s.x v10, zero
; CHECK-NEXT: vredxor.vs v8, v8, v10
; CHECK-NEXT: vmv.x.s a0, v8
@@ -576,17 +576,17 @@ define i32 @reduce_and_16xi32_prefix2(ptr %p) {
define i32 @reduce_and_16xi32_prefix5(ptr %p) {
; CHECK-LABEL: reduce_and_16xi32_prefix5:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v10, -1
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 5
+; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 6
+; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vredand.vs v8, v10, v10
+; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vredand.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
@@ -623,12 +623,12 @@ define i32 @reduce_or_16xi32_prefix5(ptr %p) {
; CHECK-NEXT: li a1, -32
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vmv.v.i v8, -1
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vmerge.vim v10, v10, 0, v0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vand.vv v8, v10, v12
+; CHECK-NEXT: vsext.vf4 v12, v10
+; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: vredor.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -668,17 +668,17 @@ define i32 @reduce_smax_16xi32_prefix2(ptr %p) {
define i32 @reduce_smax_16xi32_prefix5(ptr %p) {
; CHECK-LABEL: reduce_smax_16xi32_prefix5:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, 524288
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, a1
-; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: lui a0, 524288
+; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 5
+; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 6
+; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vredmax.vs v8, v10, v10
+; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vredmax.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
@@ -713,17 +713,17 @@ define i32 @reduce_smin_16xi32_prefix5(ptr %p) {
; CHECK-LABEL: reduce_smin_16xi32_prefix5:
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, 524288
-; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, a1
-; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: addi a1, a1, -1
+; CHECK-NEXT: vmv.s.x v10, a1
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 5
+; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 6
+; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vredmin.vs v8, v10, v10
+; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vredmin.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
@@ -760,12 +760,12 @@ define i32 @reduce_umax_16xi32_prefix5(ptr %p) {
; CHECK-NEXT: li a1, -32
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a1
-; CHECK-NEXT: vmv.v.i v8, -1
-; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.v.i v10, -1
+; CHECK-NEXT: vmerge.vim v10, v10, 0, v0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsext.vf4 v12, v8
-; CHECK-NEXT: vand.vv v8, v10, v12
+; CHECK-NEXT: vsext.vf4 v12, v10
+; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: vredmaxu.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
@@ -800,17 +800,17 @@ define i32 @reduce_umin_16xi32_prefix2(ptr %p) {
define i32 @reduce_umin_16xi32_prefix5(ptr %p) {
; CHECK-LABEL: reduce_umin_16xi32_prefix5:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
-; CHECK-NEXT: vmv.v.i v8, -1
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vsetivli zero, 8, e32, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v10, -1
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 5
+; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 6
+; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vredminu.vs v8, v10, v10
+; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vredminu.vs v8, v8, v8
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: ret
%v = load <16 x i32>, ptr %p, align 256
@@ -830,9 +830,9 @@ define float @reduce_fadd_16xf32_prefix2(ptr %p) {
; CHECK-LABEL: reduce_fadd_16xf32_prefix2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, zero
-; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vfredusum.vs v8, v9, v8
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: vmv.s.x v9, zero
+; CHECK-NEXT: vfredusum.vs v8, v8, v9
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %p, align 256
@@ -845,17 +845,17 @@ define float @reduce_fadd_16xf32_prefix2(ptr %p) {
define float @reduce_fadd_16xi32_prefix5(ptr %p) {
; CHECK-LABEL: reduce_fadd_16xi32_prefix5:
; CHECK: # %bb.0:
-; CHECK-NEXT: lui a1, 524288
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vmv.s.x v8, a1
-; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: vle32.v v8, (a0)
+; CHECK-NEXT: lui a0, 524288
+; CHECK-NEXT: vmv.s.x v10, a0
; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 5
+; CHECK-NEXT: vslideup.vi v8, v10, 5
; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 6
+; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
-; CHECK-NEXT: vslideup.vi v10, v8, 7
-; CHECK-NEXT: vfredusum.vs v8, v10, v8
+; CHECK-NEXT: vslideup.vi v8, v10, 7
+; CHECK-NEXT: vfredusum.vs v8, v8, v10
; CHECK-NEXT: vfmv.f.s fa0, v8
; CHECK-NEXT: ret
%v = load <16 x float>, ptr %p, align 256
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
index 9df160bf30f0..7adaaa05f9dd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp-vp.ll
@@ -177,8 +177,8 @@ declare float @llvm.vp.reduce.fadd.v64f32(float, <64 x float>, <64 x i1>, i32)
define float @vpreduce_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_fadd_v64f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 4
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB8_2
@@ -193,8 +193,8 @@ define float @vpreduce_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfredusum.vs v25, v16, v25, v0.t
; CHECK-NEXT: vfmv.f.s fa0, v25
; CHECK-NEXT: ret
@@ -205,8 +205,8 @@ define float @vpreduce_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32
define float @vpreduce_ord_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_ord_fadd_v64f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: li a2, 32
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 4
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB9_2
@@ -221,8 +221,8 @@ define float @vpreduce_ord_fadd_v64f32(float %s, <64 x float> %v, <64 x i1> %m,
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfredosum.vs v25, v16, v25, v0.t
; CHECK-NEXT: vfmv.f.s fa0, v25
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index 7dcfb247d37c..a6763fa22822 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -1853,9 +1853,9 @@ define float @vreduce_fminimum_v128f32(ptr %x) {
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: addi a2, a0, 128
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: addi a1, a0, 128
-; CHECK-NEXT: vle32.v v16, (a1)
+; CHECK-NEXT: vle32.v v16, (a2)
; CHECK-NEXT: addi a1, a0, 384
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: addi a1, a0, 256
@@ -2188,8 +2188,8 @@ define double @vreduce_fminimum_v64f64(ptr %x) {
; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: addi a1, a0, 128
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: addi a1, a0, 384
; CHECK-NEXT: vle64.v v8, (a1)
@@ -2286,9 +2286,9 @@ define double @vreduce_fminimum_v64f64_nonans(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: addi a1, a0, 384
-; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: addi a1, a0, 256
+; CHECK-NEXT: addi a2, a0, 384
+; CHECK-NEXT: vle64.v v16, (a2)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vle64.v v0, (a1)
@@ -2563,9 +2563,9 @@ define float @vreduce_fmaximum_v128f32(ptr %x) {
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
; CHECK-NEXT: li a1, 32
+; CHECK-NEXT: addi a2, a0, 128
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
-; CHECK-NEXT: addi a1, a0, 128
-; CHECK-NEXT: vle32.v v16, (a1)
+; CHECK-NEXT: vle32.v v16, (a2)
; CHECK-NEXT: addi a1, a0, 384
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: addi a1, a0, 256
@@ -2898,8 +2898,8 @@ define double @vreduce_fmaximum_v64f64(ptr %x) {
; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: sub sp, sp, a1
; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: addi a1, a0, 128
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: addi a1, a0, 384
; CHECK-NEXT: vle64.v v8, (a1)
@@ -2996,9 +2996,9 @@ define double @vreduce_fmaximum_v64f64_nonans(ptr %x) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: addi a1, a0, 384
-; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: addi a1, a0, 256
+; CHECK-NEXT: addi a2, a0, 384
+; CHECK-NEXT: vle64.v v16, (a2)
; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: vle64.v v0, (a1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
index 02a989a96996..016f95bfef7e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int-vp.ll
@@ -801,8 +801,8 @@ declare i32 @llvm.vp.reduce.xor.v64i32(i32, <64 x i32>, <64 x i1>, i32)
define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpreduce_xor_v64i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: li a3, 32
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 4
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: bltu a1, a3, .LBB49_2
@@ -817,8 +817,8 @@ define signext i32 @vpreduce_xor_v64i32(i32 signext %s, <64 x i32> %v, <64 x i1>
; CHECK-NEXT: sltu a1, a1, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vredxor.vs v25, v16, v25, v0.t
; CHECK-NEXT: vmv.x.s a0, v25
; CHECK-NEXT: ret
@@ -1750,9 +1750,9 @@ define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m,
; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: li a3, 32
-; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV32-NEXT: lui a2, %hi(.LCPI72_0)
; RV32-NEXT: addi a2, a2, %lo(.LCPI72_0)
+; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV32-NEXT: vle8.v v12, (a2)
; RV32-NEXT: mv a2, a0
; RV32-NEXT: vid.v v16
@@ -1794,9 +1794,9 @@ define signext i8 @vpreduce_mul_v64i8(i8 signext %s, <64 x i8> %v, <64 x i1> %m,
; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: li a3, 32
-; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV64-NEXT: lui a2, %hi(.LCPI72_0)
; RV64-NEXT: addi a2, a2, %lo(.LCPI72_0)
+; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV64-NEXT: vle8.v v12, (a2)
; RV64-NEXT: mv a2, a0
; RV64-NEXT: vid.v v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
index 6c75c9b9c294..28ce6a12c4c8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
@@ -1540,22 +1540,21 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
; RV32-NEXT: vslidedown.vi v24, v8, 16
; RV32-NEXT: vslidedown.vi v0, v16, 16
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT: vmv4r.v v8, v0
-; RV32-NEXT: vwadd.vv v0, v24, v8
+; RV32-NEXT: vwadd.vv v8, v24, v0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vwadd.vv v0, v8, v16
-; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v0, v8
; RV32-NEXT: vmv.s.x v16, zero
; RV32-NEXT: vredsum.vs v8, v8, v16
@@ -1588,22 +1587,21 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
; RV64-NEXT: vslidedown.vi v24, v8, 16
; RV64-NEXT: vslidedown.vi v0, v16, 16
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV64-NEXT: vmv4r.v v8, v0
-; RV64-NEXT: vwadd.vv v0, v24, v8
+; RV64-NEXT: vwadd.vv v8, v24, v0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vwadd.vv v0, v8, v16
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 16
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vadd.vv v8, v0, v8
; RV64-NEXT: vmv.s.x v16, zero
; RV64-NEXT: vredsum.vs v8, v8, v16
@@ -1639,22 +1637,21 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
; RV32-NEXT: vslidedown.vi v24, v8, 16
; RV32-NEXT: vslidedown.vi v0, v16, 16
; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT: vmv4r.v v8, v0
-; RV32-NEXT: vwaddu.vv v0, v24, v8
+; RV32-NEXT: vwaddu.vv v8, v24, v0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
-; RV32-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV32-NEXT: vwaddu.vv v0, v8, v16
-; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, sp, a0
; RV32-NEXT: addi a0, a0, 16
; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v0, v8
; RV32-NEXT: vmv.s.x v16, zero
; RV32-NEXT: vredsum.vs v8, v8, v16
@@ -1687,22 +1684,21 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
; RV64-NEXT: vslidedown.vi v24, v8, 16
; RV64-NEXT: vslidedown.vi v0, v16, 16
; RV64-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; RV64-NEXT: vmv4r.v v8, v0
-; RV64-NEXT: vwaddu.vv v0, v24, v8
+; RV64-NEXT: vwaddu.vv v8, v24, v0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 16
-; RV64-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; RV64-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vwaddu.vv v0, v8, v16
-; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, sp, a0
; RV64-NEXT: addi a0, a0, 16
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vadd.vv v8, v0, v8
; RV64-NEXT: vmv.s.x v16, zero
; RV64-NEXT: vredsum.vs v8, v8, v16
@@ -2286,9 +2282,9 @@ define i64 @vreduce_and_v64i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: addi a1, a0, 384
-; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: addi a2, a0, 384
+; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle64.v v24, (a0)
; RV64-NEXT: vle64.v v0, (a1)
@@ -2871,9 +2867,9 @@ define i64 @vreduce_or_v64i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: addi a1, a0, 384
-; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: addi a2, a0, 384
+; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle64.v v24, (a0)
; RV64-NEXT: vle64.v v0, (a1)
@@ -4074,9 +4070,9 @@ define i64 @vreduce_smin_v64i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: addi a1, a0, 384
-; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: addi a2, a0, 384
+; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle64.v v24, (a0)
; RV64-NEXT: vle64.v v0, (a1)
@@ -4659,9 +4655,9 @@ define i64 @vreduce_smax_v64i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: addi a1, a0, 384
-; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: addi a2, a0, 384
+; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle64.v v24, (a0)
; RV64-NEXT: vle64.v v0, (a1)
@@ -5244,9 +5240,9 @@ define i64 @vreduce_umin_v64i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: addi a1, a0, 384
-; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: addi a2, a0, 384
+; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle64.v v24, (a0)
; RV64-NEXT: vle64.v v0, (a1)
@@ -5829,9 +5825,9 @@ define i64 @vreduce_umax_v64i64(ptr %x) nounwind {
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a0)
-; RV64-NEXT: addi a1, a0, 384
-; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: addi a1, a0, 256
+; RV64-NEXT: addi a2, a0, 384
+; RV64-NEXT: vle64.v v16, (a2)
; RV64-NEXT: addi a0, a0, 128
; RV64-NEXT: vle64.v v24, (a0)
; RV64-NEXT: vle64.v v0, (a1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
index a1f010f98ab4..dc0f4e743055 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll
@@ -24,8 +24,8 @@ define zeroext i1 @vpreduce_or_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i32
; CHECK-LABEL: vpreduce_or_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -40,8 +40,8 @@ define zeroext i1 @vpreduce_xor_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i3
; CHECK-LABEL: vpreduce_xor_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -72,8 +72,8 @@ define zeroext i1 @vpreduce_or_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i32
; CHECK-LABEL: vpreduce_or_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -88,8 +88,8 @@ define zeroext i1 @vpreduce_xor_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i3
; CHECK-LABEL: vpreduce_xor_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -120,8 +120,8 @@ define zeroext i1 @vpreduce_or_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i32
; CHECK-LABEL: vpreduce_or_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -136,8 +136,8 @@ define zeroext i1 @vpreduce_xor_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i3
; CHECK-LABEL: vpreduce_xor_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -168,8 +168,8 @@ define zeroext i1 @vpreduce_or_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i32
; CHECK-LABEL: vpreduce_or_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -184,8 +184,8 @@ define zeroext i1 @vpreduce_xor_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i3
; CHECK-LABEL: vpreduce_xor_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -264,8 +264,8 @@ define zeroext i1 @vpreduce_or_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m,
; CHECK-LABEL: vpreduce_or_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -280,8 +280,8 @@ define zeroext i1 @vpreduce_xor_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m,
; CHECK-LABEL: vpreduce_xor_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -296,8 +296,8 @@ define zeroext i1 @vpreduce_add_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i3
; CHECK-LABEL: vpreduce_add_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -312,8 +312,8 @@ define zeroext i1 @vpreduce_add_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i3
; CHECK-LABEL: vpreduce_add_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -328,8 +328,8 @@ define zeroext i1 @vpreduce_add_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i3
; CHECK-LABEL: vpreduce_add_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -344,8 +344,8 @@ define zeroext i1 @vpreduce_add_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i3
; CHECK-LABEL: vpreduce_add_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -360,8 +360,8 @@ define zeroext i1 @vpreduce_add_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m,
; CHECK-LABEL: vpreduce_add_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -488,8 +488,8 @@ define zeroext i1 @vpreduce_smin_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i
; CHECK-LABEL: vpreduce_smin_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -504,8 +504,8 @@ define zeroext i1 @vpreduce_smin_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i
; CHECK-LABEL: vpreduce_smin_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -520,8 +520,8 @@ define zeroext i1 @vpreduce_smin_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i
; CHECK-LABEL: vpreduce_smin_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -536,8 +536,8 @@ define zeroext i1 @vpreduce_smin_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i
; CHECK-LABEL: vpreduce_smin_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -552,8 +552,8 @@ define zeroext i1 @vpreduce_smin_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m
; CHECK-LABEL: vpreduce_smin_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -568,8 +568,8 @@ define zeroext i1 @vpreduce_smin_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m
; CHECK-LABEL: vpreduce_smin_v32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -584,8 +584,8 @@ define zeroext i1 @vpreduce_smin_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m
; CHECK-LABEL: vpreduce_smin_v64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -600,8 +600,8 @@ define zeroext i1 @vpreduce_umax_v1i1(i1 zeroext %s, <1 x i1> %v, <1 x i1> %m, i
; CHECK-LABEL: vpreduce_umax_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -616,8 +616,8 @@ define zeroext i1 @vpreduce_umax_v2i1(i1 zeroext %s, <2 x i1> %v, <2 x i1> %m, i
; CHECK-LABEL: vpreduce_umax_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -632,8 +632,8 @@ define zeroext i1 @vpreduce_umax_v4i1(i1 zeroext %s, <4 x i1> %v, <4 x i1> %m, i
; CHECK-LABEL: vpreduce_umax_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -648,8 +648,8 @@ define zeroext i1 @vpreduce_umax_v8i1(i1 zeroext %s, <8 x i1> %v, <8 x i1> %m, i
; CHECK-LABEL: vpreduce_umax_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -664,8 +664,8 @@ define zeroext i1 @vpreduce_umax_v16i1(i1 zeroext %s, <16 x i1> %v, <16 x i1> %m
; CHECK-LABEL: vpreduce_umax_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -680,8 +680,8 @@ define zeroext i1 @vpreduce_umax_v32i1(i1 zeroext %s, <32 x i1> %v, <32 x i1> %m
; CHECK-LABEL: vpreduce_umax_v32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -696,8 +696,8 @@ define zeroext i1 @vpreduce_umax_v64i1(i1 zeroext %s, <64 x i1> %v, <64 x i1> %m
; CHECK-LABEL: vpreduce_umax_v64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
index 920d0d5fe7ba..1f856d04ca89 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-rint-vp.ll
@@ -123,15 +123,15 @@ declare <16 x half> @llvm.vp.rint.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_rint_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
@@ -246,8 +246,8 @@ define <8 x float> @vp_rint_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %evl
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -286,8 +286,8 @@ define <16 x float> @vp_rint_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -357,15 +357,15 @@ declare <4 x double> @llvm.vp.rint.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_rint_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
@@ -397,15 +397,15 @@ declare <8 x double> @llvm.vp.rint.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_rint_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
@@ -437,15 +437,15 @@ declare <15 x double> @llvm.vp.rint.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_rint_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -477,15 +477,15 @@ declare <16 x double> @llvm.vp.rint.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_rint_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -517,65 +517,54 @@ declare <32 x double> @llvm.vp.rint.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_rint_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v24, v0, 2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
@@ -597,17 +586,20 @@ define <32 x double> @vp_rint_v32f64_unmasked(<32 x double> %va, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8
; CHECK-NEXT: vmflt.vf v0, v24, fa5
+; CHECK-NEXT: addi a2, a0, -16
+; CHECK-NEXT: sltu a0, a0, a2
+; CHECK-NEXT: addi a0, a0, -1
+; CHECK-NEXT: and a0, a0, a2
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfabs.v v24, v16
+; CHECK-NEXT: vmflt.vf v7, v24, fa5
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
-; CHECK-NEXT: addi a1, a0, -16
-; CHECK-NEXT: sltu a0, a0, a1
-; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vfabs.v v24, v16
-; CHECK-NEXT: vmflt.vf v0, v24, fa5
; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
index 716cf7b0f46f..0f587232680d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-round-vp.ll
@@ -204,8 +204,8 @@ define <8 x half> @vp_round_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %evl)
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <16 x half> @llvm.vp.round.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <16 x half> @vp_round_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -439,8 +439,8 @@ define <8 x float> @vp_round_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext %ev
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -483,8 +483,8 @@ define <16 x float> @vp_round_v16f32(<16 x float> %va, <16 x i1> %m, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -561,16 +561,16 @@ declare <4 x double> @llvm.vp.round.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_round_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -605,16 +605,16 @@ declare <8 x double> @llvm.vp.round.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_round_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -649,16 +649,16 @@ declare <15 x double> @llvm.vp.round.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_round_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -693,16 +693,16 @@ declare <16 x double> @llvm.vp.round.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_round_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -737,69 +737,59 @@ declare <32 x double> @llvm.vp.round.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_round_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v24, v0, 2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a1, 4
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
index 603f9397dc90..0fb7e6a7de56 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundeven-vp.ll
@@ -204,8 +204,8 @@ define <8 x half> @vp_roundeven_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext %
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <16 x half> @llvm.vp.roundeven.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <16 x half> @vp_roundeven_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroe
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -439,8 +439,8 @@ define <8 x float> @vp_roundeven_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroext
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -483,8 +483,8 @@ define <16 x float> @vp_roundeven_v16f32(<16 x float> %va, <16 x i1> %m, i32 zer
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -561,16 +561,16 @@ declare <4 x double> @llvm.vp.roundeven.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_roundeven_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -605,16 +605,16 @@ declare <8 x double> @llvm.vp.roundeven.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_roundeven_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -649,16 +649,16 @@ declare <15 x double> @llvm.vp.roundeven.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_roundeven_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -693,16 +693,16 @@ declare <16 x double> @llvm.vp.roundeven.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_roundeven_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -737,69 +737,59 @@ declare <32 x double> @llvm.vp.roundeven.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_roundeven_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v24, v0, 2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a1, 0
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
index a5adfc36887a..927f96b64422 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-roundtozero-vp.ll
@@ -204,8 +204,8 @@ define <8 x half> @vp_roundtozero_v8f16(<8 x half> %va, <8 x i1> %m, i32 zeroext
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <16 x half> @llvm.vp.roundtozero.v16f16(<16 x half>, <16 x i1>, i32)
define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_v16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <16 x half> @vp_roundtozero_v16f16(<16 x half> %va, <16 x i1> %m, i32 zer
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -439,8 +439,8 @@ define <8 x float> @vp_roundtozero_v8f32(<8 x float> %va, <8 x i1> %m, i32 zeroe
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -483,8 +483,8 @@ define <16 x float> @vp_roundtozero_v16f32(<16 x float> %va, <16 x i1> %m, i32 z
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -561,16 +561,16 @@ declare <4 x double> @llvm.vp.roundtozero.v4f64(<4 x double>, <4 x i1>, i32)
define <4 x double> @vp_roundtozero_v4f64(<4 x double> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI18_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI18_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -605,16 +605,16 @@ declare <8 x double> @llvm.vp.roundtozero.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x double> @vp_roundtozero_v8f64(<8 x double> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI20_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI20_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -649,16 +649,16 @@ declare <15 x double> @llvm.vp.roundtozero.v15f64(<15 x double>, <15 x i1>, i32)
define <15 x double> @vp_roundtozero_v15f64(<15 x double> %va, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI22_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI22_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -693,16 +693,16 @@ declare <16 x double> @llvm.vp.roundtozero.v16f64(<16 x double>, <16 x i1>, i32)
define <16 x double> @vp_roundtozero_v16f64(<16 x double> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -737,69 +737,59 @@ declare <32 x double> @llvm.vp.roundtozero.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vp_roundtozero_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v25, v0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vmv1r.v v6, v0
; CHECK-NEXT: li a2, 16
-; CHECK-NEXT: vslidedown.vi v24, v0, 2
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: sub sp, sp, a2
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: lui a2, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a2)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a1, 1
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a1
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
index a4ab67f41595..80561be0ca2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sad.ll
@@ -115,17 +115,17 @@ define signext i32 @sad_2block_16xi8_as_i32(ptr %a, ptr %b, i32 signext %stridea
; CHECK-NEXT: vwaddu.vv v10, v9, v8
; CHECK-NEXT: vminu.vv v8, v12, v13
; CHECK-NEXT: vmaxu.vv v9, v12, v13
-; CHECK-NEXT: vsub.vv v8, v9, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: add a1, a1, a3
-; CHECK-NEXT: vle8.v v9, (a0)
-; CHECK-NEXT: vle8.v v12, (a1)
+; CHECK-NEXT: vle8.v v12, (a0)
+; CHECK-NEXT: vle8.v v13, (a1)
+; CHECK-NEXT: vsub.vv v8, v9, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v14, v8
; CHECK-NEXT: vwaddu.vv v16, v14, v10
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vminu.vv v8, v9, v12
-; CHECK-NEXT: vmaxu.vv v9, v9, v12
+; CHECK-NEXT: vminu.vv v8, v12, v13
+; CHECK-NEXT: vmaxu.vv v9, v12, v13
; CHECK-NEXT: vsub.vv v8, v9, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vzext.vf2 v10, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
index 4598bf67a236..33e9cde4c30a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-fp-vp.ll
@@ -1163,31 +1163,31 @@ define <128 x i1> @fcmp_oeq_vv_v128f16(<128 x half> %va, <128 x half> %vb, <128
; ZVFH-NEXT: addi a0, sp, 16
; ZVFH-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFH-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; ZVFH-NEXT: vslidedown.vi v7, v0, 8
+; ZVFH-NEXT: vslidedown.vi v6, v0, 8
; ZVFH-NEXT: mv a0, a2
; ZVFH-NEXT: bltu a2, a3, .LBB43_2
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: li a0, 64
; ZVFH-NEXT: .LBB43_2:
+; ZVFH-NEXT: addi a1, sp, 16
+; ZVFH-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT: addi a0, sp, 16
-; ZVFH-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFH-NEXT: vmfeq.vv v6, v8, v24, v0.t
+; ZVFH-NEXT: vmfeq.vv v7, v8, v24, v0.t
; ZVFH-NEXT: addi a0, a2, -64
; ZVFH-NEXT: sltu a1, a2, a0
; ZVFH-NEXT: addi a1, a1, -1
; ZVFH-NEXT: and a0, a1, a0
+; ZVFH-NEXT: vmv1r.v v0, v6
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 3
+; ZVFH-NEXT: add a1, sp, a1
+; ZVFH-NEXT: addi a1, a1, 16
+; ZVFH-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
-; ZVFH-NEXT: vmv1r.v v0, v7
-; ZVFH-NEXT: csrr a0, vlenb
-; ZVFH-NEXT: slli a0, a0, 3
-; ZVFH-NEXT: add a0, sp, a0
-; ZVFH-NEXT: addi a0, a0, 16
-; ZVFH-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFH-NEXT: vmfeq.vv v24, v16, v8, v0.t
+; ZVFH-NEXT: vmfeq.vv v8, v16, v24, v0.t
; ZVFH-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; ZVFH-NEXT: vslideup.vi v6, v24, 8
-; ZVFH-NEXT: vmv.v.v v0, v6
+; ZVFH-NEXT: vslideup.vi v7, v8, 8
+; ZVFH-NEXT: vmv.v.v v0, v7
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 4
; ZVFH-NEXT: add sp, sp, a0
@@ -2865,37 +2865,36 @@ define <32 x i1> @fcmp_oeq_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32 x
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v6, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB87_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB87_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v6, v8, v24, v0.t
+; CHECK-NEXT: vmfeq.vv v7, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v24, v16, v8, v0.t
+; CHECK-NEXT: vmfeq.vv v8, v16, v24, v0.t
; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
-; CHECK-NEXT: vslideup.vi v6, v24, 2
-; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: vslideup.vi v7, v8, 2
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
index 21bbca00921d..5f3847e08505 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp.ll
@@ -611,10 +611,10 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: vle8.v v8, (a2)
; CHECK-NEXT: addi a2, a3, -128
; CHECK-NEXT: sltu a4, a3, a2
-; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a2, a4, a2
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vv v6, v16, v8, v0.t
@@ -622,7 +622,6 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: .LBB51_2:
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -631,6 +630,7 @@ define <256 x i1> @icmp_eq_vv_v256i8(<256 x i8> %va, <256 x i8> %vb, <256 x i1>
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmv1r.v v8, v6
@@ -660,8 +660,8 @@ define <256 x i1> @icmp_eq_vx_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 z
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB52_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmv1r.v v8, v25
@@ -689,8 +689,8 @@ define <256 x i1> @icmp_eq_vx_swap_v256i8(<256 x i8> %va, i8 %b, <256 x i1> %m,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB53_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmv1r.v v8, v25
@@ -1264,31 +1264,31 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 4
+; CHECK-NEXT: vslidedown.vi v6, v0, 4
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a3, .LBB99_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: .LBB99_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmseq.vv v6, v8, v24, v0.t
+; CHECK-NEXT: vmseq.vv v7, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -32
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmseq.vv v24, v16, v8, v0.t
+; CHECK-NEXT: vmseq.vv v8, v16, v24, v0.t
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v6, v24, 4
-; CHECK-NEXT: vmv1r.v v0, v6
+; CHECK-NEXT: vslideup.vi v7, v8, 4
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
@@ -1301,8 +1301,8 @@ define <64 x i1> @icmp_eq_vv_v64i32(<64 x i32> %va, <64 x i32> %vb, <64 x i1> %m
define <64 x i1> @icmp_eq_vx_v64i32(<64 x i32> %va, i32 %b, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_v64i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: li a3, 32
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 4
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: bltu a1, a3, .LBB100_2
@@ -1315,8 +1315,8 @@ define <64 x i1> @icmp_eq_vx_v64i32(<64 x i32> %va, i32 %b, <64 x i1> %m, i32 ze
; CHECK-NEXT: sltu a1, a1, a2
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a2
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vx v8, v16, a0, v0.t
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v25, v8, 4
@@ -1331,8 +1331,8 @@ define <64 x i1> @icmp_eq_vx_v64i32(<64 x i32> %va, i32 %b, <64 x i1> %m, i32 ze
define <64 x i1> @icmp_eq_vx_swap_v64i32(<64 x i32> %va, i32 %b, <64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: icmp_eq_vx_swap_v64i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: li a3, 32
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 4
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: bltu a1, a3, .LBB101_2
@@ -1345,8 +1345,8 @@ define <64 x i1> @icmp_eq_vx_swap_v64i32(<64 x i32> %va, i32 %b, <64 x i1> %m, i
; CHECK-NEXT: sltu a1, a1, a2
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a2
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vx v8, v16, a0, v0.t
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v25, v8, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll
index 52596d889241..d1980ee3b0a6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sext-vp.ll
@@ -151,8 +151,8 @@ declare <32 x i64> @llvm.vp.sext.v32i64.v32i32(<32 x i32>, <32 x i1>, i32)
define <32 x i64> @vsext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsext_v32i64_v32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB12_2
@@ -167,8 +167,8 @@ define <32 x i64> @vsext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 16
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsext.vf2 v16, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
index 609b4e982489..925366e8b1d5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-concat.ll
@@ -33,8 +33,8 @@ define <8 x i32> @concat_4xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
; VLS-LABEL: concat_4xv2i32:
; VLS: # %bb.0:
; VLS-NEXT: vmv1r.v v13, v10
-; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; VLS-NEXT: vmv1r.v v12, v8
+; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; VLS-NEXT: vslideup.vi v13, v11, 2
; VLS-NEXT: vslideup.vi v12, v9, 2
; VLS-NEXT: vmv2r.v v8, v12
@@ -147,8 +147,8 @@ define <16 x i32> @concat_8xv2i32(<2 x i32> %a, <2 x i32> %b, <2 x i32> %c, <2 x
; VLS-NEXT: vmv1r.v v19, v14
; VLS-NEXT: vmv1r.v v18, v12
; VLS-NEXT: vmv1r.v v17, v10
-; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; VLS-NEXT: vmv1r.v v16, v8
+; VLS-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; VLS-NEXT: vslideup.vi v19, v15, 2
; VLS-NEXT: vslideup.vi v18, v13, 2
; VLS-NEXT: vslideup.vi v17, v11, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
index 8499086994bc..d461fa8378cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-exact-vlen.ll
@@ -164,11 +164,10 @@ define <4 x i64> @m2_splat_into_slide_two_source_v2_lo(<4 x i64> %v1, <4 x i64>
define <4 x i64> @m2_splat_into_slide_two_source(<4 x i64> %v1, <4 x i64> %v2) vscale_range(2,2) {
; CHECK-LABEL: m2_splat_into_slide_two_source:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-NEXT: vrgather.vi v12, v8, 0
; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v0, 12
; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT: vrgather.vi v12, v8, 0
; CHECK-NEXT: vslideup.vi v12, v10, 1, v0.t
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
index 47d7baade8b4..d70ed2fb0e26 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-reverse.ll
@@ -101,10 +101,10 @@ define <32 x i1> @reverse_v32i1(<32 x i1> %a) {
; NO-ZVBB-LABEL: reverse_v32i1:
; NO-ZVBB: # %bb.0:
; NO-ZVBB-NEXT: li a0, 32
+; NO-ZVBB-NEXT: lui a1, %hi(.LCPI4_0)
+; NO-ZVBB-NEXT: addi a1, a1, %lo(.LCPI4_0)
; NO-ZVBB-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; NO-ZVBB-NEXT: lui a0, %hi(.LCPI4_0)
-; NO-ZVBB-NEXT: addi a0, a0, %lo(.LCPI4_0)
-; NO-ZVBB-NEXT: vle8.v v8, (a0)
+; NO-ZVBB-NEXT: vle8.v v8, (a1)
; NO-ZVBB-NEXT: vmv.v.i v10, 0
; NO-ZVBB-NEXT: vmerge.vim v10, v10, 1, v0
; NO-ZVBB-NEXT: vrgather.vv v12, v10, v8
@@ -124,10 +124,10 @@ define <64 x i1> @reverse_v64i1(<64 x i1> %a) {
; NO-ZVBB-LABEL: reverse_v64i1:
; NO-ZVBB: # %bb.0:
; NO-ZVBB-NEXT: li a0, 64
+; NO-ZVBB-NEXT: lui a1, %hi(.LCPI5_0)
+; NO-ZVBB-NEXT: addi a1, a1, %lo(.LCPI5_0)
; NO-ZVBB-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; NO-ZVBB-NEXT: lui a0, %hi(.LCPI5_0)
-; NO-ZVBB-NEXT: addi a0, a0, %lo(.LCPI5_0)
-; NO-ZVBB-NEXT: vle8.v v8, (a0)
+; NO-ZVBB-NEXT: vle8.v v8, (a1)
; NO-ZVBB-NEXT: vmv.v.i v12, 0
; NO-ZVBB-NEXT: vmerge.vim v12, v12, 1, v0
; NO-ZVBB-NEXT: vrgather.vv v16, v12, v8
@@ -147,10 +147,10 @@ define <128 x i1> @reverse_v128i1(<128 x i1> %a) {
; CHECK-LABEL: reverse_v128i1:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 128
+; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
+; CHECK-NEXT: addi a1, a1, %lo(.LCPI6_0)
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: lui a0, %hi(.LCPI6_0)
-; CHECK-NEXT: addi a0, a0, %lo(.LCPI6_0)
-; CHECK-NEXT: vle8.v v8, (a0)
+; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vmv.v.i v16, 0
; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
; CHECK-NEXT: vrgather.vv v24, v16, v8
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-transpose.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-transpose.ll
index 038fead011d8..82c57a9d90a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-transpose.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-transpose.ll
@@ -19,8 +19,8 @@ define <8 x i8> @trn1.v8i8(<8 x i8> %v0, <8 x i8> %v1) {
define <8 x i8> @trn2.v8i8(<8 x i8> %v0, <8 x i8> %v1) {
; CHECK-LABEL: trn2.v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: li a0, 170
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
@@ -46,13 +46,12 @@ define <16 x i8> @trn1.v16i8(<16 x i8> %v0, <16 x i8> %v1) {
define <16 x i8> @trn2.v16i8(<16 x i8> %v0, <16 x i8> %v1) {
; CHECK-LABEL: trn2.v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: lui a0, 11
; CHECK-NEXT: addi a0, a0, -1366
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
; CHECK-NEXT: ret
%tmp0 = shufflevector <16 x i8> %v0, <16 x i8> %v1, <16 x i32> <i32 1, i32 17, i32 3, i32 19, i32 5, i32 21, i32 7, i32 23, i32 9, i32 25, i32 11, i32 27, i32 13, i32 29, i32 15, i32 31>
@@ -62,9 +61,8 @@ define <16 x i8> @trn2.v16i8(<16 x i8> %v0, <16 x i8> %v1) {
define <4 x i16> @trn1.v4i16(<4 x i16> %v0, <4 x i16> %v1) {
; CHECK-LABEL: trn1.v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vslideup.vi v8, v9, 1, v0.t
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x i16> %v0, <4 x i16> %v1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -98,8 +96,8 @@ define <8 x i16> @trn1.v8i16(<8 x i16> %v0, <8 x i16> %v1) {
define <8 x i16> @trn2.v8i16(<8 x i16> %v0, <8 x i16> %v1) {
; CHECK-LABEL: trn2.v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: li a0, 170
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
@@ -133,9 +131,8 @@ define <2 x i32> @trn2.v2i32(<2 x i32> %v0, <2 x i32> %v1) {
define <4 x i32> @trn1.v4i32(<4 x i32> %v0, <4 x i32> %v1) {
; CHECK-LABEL: trn1.v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vslideup.vi v8, v9, 1, v0.t
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x i32> %v0, <4 x i32> %v1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -201,9 +198,8 @@ define <2 x float> @trn2.v2f32(<2 x float> %v0, <2 x float> %v1) {
define <4 x float> @trn1.v4f32(<4 x float> %v0, <4 x float> %v1) {
; CHECK-LABEL: trn1.v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vslideup.vi v8, v9, 1, v0.t
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x float> %v0, <4 x float> %v1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -247,9 +243,8 @@ define <2 x double> @trn2.v2f64(<2 x double> %v0, <2 x double> %v1) {
define <4 x half> @trn1.v4f16(<4 x half> %v0, <4 x half> %v1) {
; CHECK-LABEL: trn1.v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT: vmv.v.i v0, 10
; CHECK-NEXT: vslideup.vi v8, v9, 1, v0.t
; CHECK-NEXT: ret
%tmp0 = shufflevector <4 x half> %v0, <4 x half> %v1, <4 x i32> <i32 0, i32 4, i32 2, i32 6>
@@ -283,8 +278,8 @@ define <8 x half> @trn1.v8f16(<8 x half> %v0, <8 x half> %v1) {
define <8 x half> @trn2.v8f16(<8 x half> %v0, <8 x half> %v1) {
; CHECK-LABEL: trn2.v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: li a0, 170
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vslidedown.vi v8, v8, 1
; CHECK-NEXT: vmerge.vvm v8, v8, v9, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll
index 5e93fdfc7a65..bf0eab77d0ac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-sitofp-vp.ll
@@ -390,8 +390,8 @@ declare <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64>, <32 x i1>, i32)
define <32 x double> @vsitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsitofp_v32f64_v32i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB25_2
@@ -404,8 +404,8 @@ define <32 x double> @vsitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 ze
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.sitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
index 0e6b03bf1632..0e1105848440 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-load-combine.ll
@@ -75,9 +75,9 @@ define void @widen_4xv4i16_unaligned(ptr %x, ptr %z) {
; CHECK-NO-MISALIGN: # %bb.0:
; CHECK-NO-MISALIGN-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NO-MISALIGN-NEXT: vle8.v v8, (a0)
-; CHECK-NO-MISALIGN-NEXT: addi a2, a0, 16
-; CHECK-NO-MISALIGN-NEXT: vle8.v v10, (a2)
; CHECK-NO-MISALIGN-NEXT: addi a2, a0, 8
+; CHECK-NO-MISALIGN-NEXT: addi a3, a0, 16
+; CHECK-NO-MISALIGN-NEXT: vle8.v v10, (a3)
; CHECK-NO-MISALIGN-NEXT: addi a0, a0, 24
; CHECK-NO-MISALIGN-NEXT: vle8.v v9, (a0)
; CHECK-NO-MISALIGN-NEXT: vle8.v v11, (a2)
@@ -186,9 +186,9 @@ define void @strided_constant_mismatch_4xv4i16(ptr %x, ptr %z) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: addi a2, a0, 6
-; CHECK-NEXT: vle16.v v10, (a2)
; CHECK-NEXT: addi a2, a0, 2
+; CHECK-NEXT: addi a3, a0, 6
+; CHECK-NEXT: vle16.v v10, (a3)
; CHECK-NEXT: addi a0, a0, 8
; CHECK-NEXT: vle16.v v9, (a0)
; CHECK-NEXT: vle16.v v11, (a2)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
index 6a8d2008de74..5e64e9fbc1a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpload.ll
@@ -480,14 +480,14 @@ define <32 x double> @strided_vpload_v32f64(ptr %ptr, i32 signext %stride, <32 x
; CHECK-NEXT: addi a5, a2, -16
; CHECK-NEXT: sltu a2, a2, a5
; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a5
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v9, 2
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: and a2, a2, a5
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vlse64.v v16, (a4), a1, v0.t
-; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%load = call <32 x double> @llvm.experimental.vp.strided.load.v32f64.p0.i32(ptr %ptr, i32 %stride, <32 x i1> %m, i32 %evl)
@@ -555,13 +555,13 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
; CHECK-RV32-NEXT: li a4, 16
; CHECK-RV32-NEXT: .LBB42_6:
; CHECK-RV32-NEXT: mul a5, a4, a2
-; CHECK-RV32-NEXT: add a5, a1, a5
; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2
+; CHECK-RV32-NEXT: add a5, a1, a5
; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v24, (a5), a2, v0.t
-; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v8
+; CHECK-RV32-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a1), a2, v0.t
; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-RV32-NEXT: vse64.v v8, (a0)
@@ -605,13 +605,13 @@ define <33 x double> @strided_load_v33f64(ptr %ptr, i64 %stride, <33 x i1> %mask
; CHECK-RV64-NEXT: li a3, 16
; CHECK-RV64-NEXT: .LBB42_6:
; CHECK-RV64-NEXT: mul a5, a3, a2
-; CHECK-RV64-NEXT: add a5, a1, a5
; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2
+; CHECK-RV64-NEXT: add a5, a1, a5
; CHECK-RV64-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v24, (a5), a2, v0.t
-; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v8
+; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a1), a2, v0.t
; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-RV64-NEXT: vse64.v v8, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
index dee422a4c17d..35f123f1157f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-strided-vpstore.ll
@@ -420,9 +420,9 @@ define void @strided_store_v32f64(<32 x double> %v, ptr %ptr, i32 signext %strid
; CHECK-NEXT: addi a3, a2, -16
; CHECK-NEXT: sltu a2, a2, a3
; CHECK-NEXT: addi a2, a2, -1
-; CHECK-NEXT: and a2, a2, a3
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: and a2, a2, a3
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vsse64.v v16, (a0), a1, v0.t
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
index 9fa8ab39723f..7513d31b54bd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-trunc-vp.ll
@@ -54,8 +54,8 @@ define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zero
; CHECK-LABEL: vtrunc_v128i7_v128i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: li a1, 64
+; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v12, v0, 8
; CHECK-NEXT: mv a2, a0
; CHECK-NEXT: bltu a0, a1, .LBB4_2
@@ -68,8 +68,8 @@ define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zero
; CHECK-NEXT: sltu a0, a0, a2
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t
; CHECK-NEXT: li a0, 128
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
@@ -243,75 +243,67 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 8, e8, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v5, v0, 8
-; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v26, v0, 4
+; CHECK-NEXT: vslidedown.vi v25, v0, 8
; CHECK-NEXT: addi a2, a1, 512
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v8, (a2)
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: li a3, 48
-; CHECK-NEXT: mul a2, a2, a3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v27, v5, 4
-; CHECK-NEXT: addi a2, a1, 640
+; CHECK-NEXT: vslidedown.vi v27, v25, 4
+; CHECK-NEXT: addi a3, a1, 640
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v16, (a2)
+; CHECK-NEXT: vle64.v v8, (a3)
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: addi a2, a7, -64
-; CHECK-NEXT: sltu a3, a7, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a4, a3, a2
-; CHECK-NEXT: addi a2, a4, -32
-; CHECK-NEXT: sltu a3, a4, a2
-; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a2
-; CHECK-NEXT: addi a2, a3, -16
-; CHECK-NEXT: sltu a5, a3, a2
-; CHECK-NEXT: addi a5, a5, -1
-; CHECK-NEXT: and a2, a5, a2
; CHECK-NEXT: vslidedown.vi v0, v27, 2
-; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
-; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: li a5, 24
-; CHECK-NEXT: mul a2, a2, a5
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: addi a3, a7, -64
+; CHECK-NEXT: sltu a4, a7, a3
+; CHECK-NEXT: addi a4, a4, -1
+; CHECK-NEXT: and a4, a4, a3
+; CHECK-NEXT: addi a3, a4, -32
+; CHECK-NEXT: sltu a5, a4, a3
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a3, a5, a3
+; CHECK-NEXT: addi a5, a3, -16
+; CHECK-NEXT: sltu a6, a3, a5
+; CHECK-NEXT: addi a6, a6, -1
+; CHECK-NEXT: and a5, a6, a5
+; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
+; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
+; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: li a6, 24
+; CHECK-NEXT: mul a5, a5, a6
+; CHECK-NEXT: add a5, sp, a5
+; CHECK-NEXT: addi a5, a5, 16
+; CHECK-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vle64.v v8, (a2)
; CHECK-NEXT: addi a5, a1, 128
+; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v26, v7, 4
; CHECK-NEXT: bltu a3, a2, .LBB16_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 16
; CHECK-NEXT: .LBB16_2:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v28, v26, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v8, (a5)
-; CHECK-NEXT: addi a5, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma
-; CHECK-NEXT: li a3, 64
-; CHECK-NEXT: vmv1r.v v0, v27
+; CHECK-NEXT: vle64.v v16, (a5)
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: li a6, 48
; CHECK-NEXT: mul a5, a5, a6
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
+; CHECK-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v28, v26, 2
+; CHECK-NEXT: li a5, 64
+; CHECK-NEXT: vmv1r.v v0, v27
+; CHECK-NEXT: vsetvli zero, a3, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
-; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: li a6, 56
-; CHECK-NEXT: mul a5, a5, a6
-; CHECK-NEXT: add a5, sp, a5
-; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vs8r.v v16, (a5) # Unknown-size Folded Spill
+; CHECK-NEXT: mul a3, a3, a6
+; CHECK-NEXT: add a3, sp, a3
+; CHECK-NEXT: addi a3, a3, 16
+; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: mv a6, a7
-; CHECK-NEXT: bltu a7, a3, .LBB16_4
+; CHECK-NEXT: bltu a7, a5, .LBB16_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a6, 64
; CHECK-NEXT: .LBB16_4:
@@ -332,10 +324,14 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: sltu t1, a6, t0
; CHECK-NEXT: addi t1, t1, -1
; CHECK-NEXT: and t0, t1, t0
-; CHECK-NEXT: vsetvli zero, t0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v28
-; CHECK-NEXT: addi t0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (t0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr t1, vlenb
+; CHECK-NEXT: li t2, 48
+; CHECK-NEXT: mul t1, t1, t2
+; CHECK-NEXT: add t1, sp, t1
+; CHECK-NEXT: addi t1, t1, 16
+; CHECK-NEXT: vl8r.v v16, (t1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, t0, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
; CHECK-NEXT: csrr t0, vlenb
; CHECK-NEXT: slli t0, t0, 4
@@ -346,19 +342,21 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: li a6, 16
; CHECK-NEXT: .LBB16_6:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v20, v5, 2
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a5)
+; CHECK-NEXT: addi a5, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
; CHECK-NEXT: addi a1, a1, 256
-; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v27, v25, 2
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: slli a5, a5, 3
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
-; CHECK-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
-; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
+; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
+; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: li a6, 48
; CHECK-NEXT: mul a5, a5, a6
@@ -371,13 +369,20 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: li a5, 32
; CHECK-NEXT: .LBB16_8:
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT: vle64.v v24, (a1)
+; CHECK-NEXT: vle64.v v8, (a1)
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: addi a1, a5, -16
; CHECK-NEXT: sltu a5, a5, a1
; CHECK-NEXT: addi a5, a5, -1
; CHECK-NEXT: and a1, a5, a1
+; CHECK-NEXT: vmv1r.v v0, v27
+; CHECK-NEXT: addi a5, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v20
; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
; CHECK-NEXT: bltu a4, a2, .LBB16_10
; CHECK-NEXT: # %bb.9:
@@ -385,8 +390,13 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: .LBB16_10:
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v6, v7, 2
+; CHECK-NEXT: vmv1r.v v0, v25
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a4, e32, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v5
; CHECK-NEXT: vnsrl.wi v8, v24, 0, v0.t
; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: mv a1, a7
@@ -401,13 +411,13 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: addi a4, a4, 16
; CHECK-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: li a5, 56
; CHECK-NEXT: mul a4, a4, a5
; CHECK-NEXT: add a4, sp, a4
; CHECK-NEXT: addi a4, a4, 16
; CHECK-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vslideup.vi v8, v24, 16
; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: li a5, 56
@@ -446,19 +456,18 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: sltu a1, a1, a4
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a4
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 5
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a4, a4, 5
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
; CHECK-NEXT: bltu a7, a2, .LBB16_14
; CHECK-NEXT: # %bb.13:
; CHECK-NEXT: li a7, 16
; CHECK-NEXT: .LBB16_14:
-; CHECK-NEXT: vsetvli zero, a7, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: li a2, 40
@@ -466,6 +475,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a7, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vslideup.vi v16, v8, 16
@@ -509,8 +519,8 @@ define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext
; CHECK-LABEL: vtrunc_v32i32_v32i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv8r.v v24, v8
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v12, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB17_2
@@ -523,8 +533,8 @@ define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v24, v16, 0, v0.t
; CHECK-NEXT: li a0, 32
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll
index 698c48bc5565..e28d55f46abc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-uitofp-vp.ll
@@ -390,8 +390,8 @@ declare <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64>, <32 x i1>, i32)
define <32 x double> @vuitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vuitofp_v32f64_v32i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB25_2
@@ -404,8 +404,8 @@ define <32 x double> @vuitofp_v32f64_v32i64(<32 x i64> %va, <32 x i1> %m, i32 ze
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.uitofp.v32f64.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
index 36c36a13964c..805b548b0cd1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll
@@ -227,7 +227,7 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV64-SLOW-NEXT: andi a0, a0, 2
; RV64-SLOW-NEXT: beqz a0, .LBB5_2
; RV64-SLOW-NEXT: .LBB5_4: # %cond.load1
-; RV64-SLOW-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; RV64-SLOW-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-SLOW-NEXT: vslidedown.vi v8, v8, 1
; RV64-SLOW-NEXT: vmv.x.s a0, v8
; RV64-SLOW-NEXT: lwu a1, 4(a0)
@@ -235,7 +235,6 @@ define <2 x i64> @mgather_v2i64_align4(<2 x ptr> %ptrs, <2 x i1> %m, <2 x i64> %
; RV64-SLOW-NEXT: slli a1, a1, 32
; RV64-SLOW-NEXT: or a0, a1, a0
; RV64-SLOW-NEXT: vmv.s.x v8, a0
-; RV64-SLOW-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-SLOW-NEXT: vslideup.vi v9, v8, 1
; RV64-SLOW-NEXT: vmv1r.v v8, v9
; RV64-SLOW-NEXT: ret
@@ -612,7 +611,7 @@ define void @masked_store_v2i32_align2(<2 x i32> %val, ptr %a, <2 x i32> %m) nou
; SLOW-NEXT: andi a1, a1, 2
; SLOW-NEXT: beqz a1, .LBB9_2
; SLOW-NEXT: .LBB9_4: # %cond.store1
-; SLOW-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; SLOW-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; SLOW-NEXT: vslidedown.vi v8, v8, 1
; SLOW-NEXT: vmv.x.s a1, v8
; SLOW-NEXT: sh a1, 4(a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll
index 600290a62515..ea7f6beb22a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vaaddu.ll
@@ -38,9 +38,9 @@ define <8 x i8> @vaaddu_vx_v8i8_floor(<8 x i8> %x, i8 %y) {
define <8 x i8> @vaaddu_vv_v8i8_floor_sexti16(<8 x i8> %x, <8 x i8> %y) {
; CHECK-LABEL: vaaddu_vv_v8i8_floor_sexti16:
; CHECK: # %bb.0:
+; CHECK-NEXT: csrwi vxrm, 2
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vwadd.vv v10, v8, v9
-; CHECK-NEXT: vnsrl.wi v8, v10, 1
+; CHECK-NEXT: vaadd.vv v8, v8, v9
; CHECK-NEXT: ret
%xzv = sext <8 x i8> %x to <8 x i16>
%yzv = sext <8 x i8> %y to <8 x i16>
@@ -248,12 +248,9 @@ define <8 x i8> @vaaddu_vx_v8i8_ceil(<8 x i8> %x, i8 %y) {
define <8 x i8> @vaaddu_vv_v8i8_ceil_sexti16(<8 x i8> %x, <8 x i8> %y) {
; CHECK-LABEL: vaaddu_vv_v8i8_ceil_sexti16:
; CHECK: # %bb.0:
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT: vwadd.vv v10, v8, v9
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vadd.vi v8, v10, 1
-; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vnsrl.wi v8, v8, 1
+; CHECK-NEXT: vaadd.vv v8, v8, v9
; CHECK-NEXT: ret
%xzv = sext <8 x i8> %x to <8 x i16>
%yzv = sext <8 x i8> %y to <8 x i16>
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
index 2c62cbd583d0..5601bd5ee7a3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vadd-vp.ll
@@ -377,8 +377,8 @@ define <256 x i8> @vadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %ev
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
@@ -416,8 +416,8 @@ define <256 x i8> @vadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.add.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
@@ -1348,8 +1348,8 @@ declare <32 x i64> @llvm.vp.add.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vadd_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB108_2
@@ -1365,15 +1365,15 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB108_2
@@ -1386,8 +1386,8 @@ define <32 x i64> @vadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
@@ -1468,8 +1468,8 @@ define <32 x i64> @vadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1479,8 +1479,8 @@ define <32 x i64> @vadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.add.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
index 507cf5cc6b80..d414be76672a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vand-vp.ll
@@ -1140,15 +1140,16 @@ define <11 x i64> @vand_vx_v11i64(<11 x i64> %va, i64 %b, <11 x i1> %m, i32 zero
; RV32-LABEL: vand_vx_v11i64:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v16, v0
-; RV32-NEXT: li a3, 32
-; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV32-NEXT: lui a3, 341
; RV32-NEXT: addi a3, a3, 1365
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; RV32-NEXT: vmv.s.x v0, a3
+; RV32-NEXT: li a3, 32
+; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV32-NEXT: vmv.v.x v24, a1
; RV32-NEXT: vmerge.vxm v24, v24, a0, v0
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v16
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
;
@@ -1167,10 +1168,11 @@ define <11 x i64> @vand_vx_v11i64_unmasked(<11 x i64> %va, i64 %b, i32 zeroext %
; RV32-LABEL: vand_vx_v11i64_unmasked:
; RV32: # %bb.0:
; RV32-NEXT: li a3, 32
+; RV32-NEXT: lui a4, 341
+; RV32-NEXT: addi a4, a4, 1365
+; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; RV32-NEXT: vmv.s.x v0, a4
; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; RV32-NEXT: lui a3, 341
-; RV32-NEXT: addi a3, a3, 1365
-; RV32-NEXT: vmv.s.x v0, a3
; RV32-NEXT: vmv.v.x v16, a1
; RV32-NEXT: vmerge.vxm v16, v16, a0, v0
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
index 01b07b4081e6..77a095303675 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vcopysign-vp.ll
@@ -298,37 +298,46 @@ define <32 x double> @vfsgnj_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
-; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v16, v16, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
index f32e2bbf3794..ae3dce497c6d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfabs-vp.ll
@@ -379,8 +379,8 @@ declare <32 x double> @llvm.vp.fabs.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vfabs_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfabs_vv_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
@@ -393,8 +393,8 @@ define <32 x double> @vfabs_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.fabs.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
index 0574773fb2fd..e2e48cee3eac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfma-vp.ll
@@ -862,51 +862,51 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a2)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vle64.v v24, (a0)
-; CHECK-NEXT: li a1, 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: li a1, 16
; CHECK-NEXT: mv a0, a4
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: bltu a4, a1, .LBB50_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, a4, -16
; CHECK-NEXT: sltu a1, a4, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul a0, a0, a1
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a1, a1, a2
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -954,25 +954,25 @@ define <32 x double> @vfma_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB51_2:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v0, v8, v24
; CHECK-NEXT: addi a0, a4, -16
; CHECK-NEXT: sltu a1, a4, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v24, v16, v8
; CHECK-NEXT: vmv8r.v v8, v0
; CHECK-NEXT: vmv.v.v v16, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
index ffa88e28d7dc..c83a298cb501 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmax-vp.ll
@@ -390,37 +390,46 @@ define <32 x double> @vfmax_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
-; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmax.vv v16, v16, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
index 17f851e172f8..60dbededb90a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmin-vp.ll
@@ -390,37 +390,46 @@ define <32 x double> @vfmin_vv_v32f64(<32 x double> %va, <32 x double> %vb, <32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: slli a1, a1, 4
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a1)
-; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: mv a0, a2
; CHECK-NEXT: bltu a2, a1, .LBB26_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB26_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v24, v0.t
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmin.vv v16, v16, v24, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
index 288efb0f1fc2..6c695b43d271 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfmuladd-vp.ll
@@ -626,51 +626,51 @@ define <32 x double> @vfma_vv_v32f64(<32 x double> %va, <32 x double> %b, <32 x
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v7, v0, 2
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a2)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: vle64.v v24, (a0)
-; CHECK-NEXT: li a1, 16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: li a1, 16
; CHECK-NEXT: mv a0, a4
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
; CHECK-NEXT: bltu a4, a1, .LBB50_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB50_2:
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v24, v8, v16, v0.t
+; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, a4, -16
; CHECK-NEXT: sltu a1, a4, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul a0, a0, a1
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a1, a1, a2
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -718,25 +718,25 @@ define <32 x double> @vfma_vv_v32f64_unmasked(<32 x double> %va, <32 x double> %
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: .LBB51_2:
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v0, v8, v24
; CHECK-NEXT: addi a0, a4, -16
; CHECK-NEXT: sltu a1, a4, a0
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfmadd.vv v24, v16, v8
; CHECK-NEXT: vmv8r.v v8, v0
; CHECK-NEXT: vmv.v.v v16, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
index c36ec25c04f9..fbc4c56a9113 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfneg-vp.ll
@@ -379,8 +379,8 @@ declare <32 x double> @llvm.vp.fneg.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vfneg_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfneg_vv_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
@@ -393,8 +393,8 @@ define <32 x double> @vfneg_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroe
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfneg.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.fneg.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
index 6004eb4fe217..988b200ae536 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfsqrt-vp.ll
@@ -379,8 +379,8 @@ declare <32 x double> @llvm.vp.sqrt.v32f64(<32 x double>, <32 x i1>, i32)
define <32 x double> @vfsqrt_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsqrt_vv_v32f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB26_2
@@ -393,8 +393,8 @@ define <32 x double> @vfsqrt_vv_v32f64(<32 x double> %va, <32 x i1> %m, i32 zero
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a1
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsqrt.v v16, v16, v0.t
; CHECK-NEXT: ret
%v = call <32 x double> @llvm.vp.sqrt.v32f64(<32 x double> %va, <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
index dd3a50cfd773..05c7bd990642 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwadd.ll
@@ -105,13 +105,12 @@ define <64 x float> @vfwadd_v64f16(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vfwadd.vv v8, v16, v24
+; CHECK-NEXT: vfwadd.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwadd.vv v8, v16, v0
@@ -216,13 +215,12 @@ define <32 x double> @vfwadd_v32f32(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vfwadd.vv v8, v16, v24
+; CHECK-NEXT: vfwadd.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwadd.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
index 7eaa1856ce22..5a57801d33b4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmul.ll
@@ -105,13 +105,12 @@ define <64 x float> @vfwmul_v64f16(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vfwmul.vv v8, v16, v24
+; CHECK-NEXT: vfwmul.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwmul.vv v8, v16, v0
@@ -216,13 +215,12 @@ define <32 x double> @vfwmul_v32f32(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vfwmul.vv v8, v16, v24
+; CHECK-NEXT: vfwmul.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwmul.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
index 8cf7c5f17586..2c706cad9742 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwsub.ll
@@ -105,13 +105,12 @@ define <64 x float> @vfwsub_v64f16(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vfwsub.vv v8, v16, v24
+; CHECK-NEXT: vfwsub.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwsub.vv v8, v16, v0
@@ -216,13 +215,12 @@ define <32 x double> @vfwsub_v32f32(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vfwsub.vv v8, v16, v24
+; CHECK-NEXT: vfwsub.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vfwsub.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
index 3db44e87109b..9789afda9344 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmax-vp.ll
@@ -282,8 +282,8 @@ define <256 x i8> @vmax_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zero
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -325,8 +325,8 @@ define <256 x i8> @vmax_vx_v258i8_evl129(<256 x i8> %va, i8 %b, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -1021,8 +1021,8 @@ declare <32 x i64> @llvm.vp.smax.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vmax_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB74_2
@@ -1038,15 +1038,15 @@ define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmax.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB74_2
@@ -1060,8 +1060,8 @@ define <32 x i64> @vmax_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmax.vx v16, v16, a2, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.smax.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
index c97c2232715f..36b0a4642b61 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmaxu-vp.ll
@@ -281,8 +281,8 @@ define <256 x i8> @vmaxu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zer
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -324,8 +324,8 @@ define <256 x i8> @vmaxu_vx_v258i8_evl129(<256 x i8> %va, i8 %b, <256 x i1> %m)
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -1020,8 +1020,8 @@ declare <32 x i64> @llvm.vp.umax.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vmaxu_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB74_2
@@ -1037,15 +1037,15 @@ define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmaxu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vmaxu_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB74_2
@@ -1059,8 +1059,8 @@ define <32 x i64> @vmaxu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmaxu.vx v16, v16, a2, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.umax.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
index eaa19110a2a2..adb0a30f34d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vmin-vp.ll
@@ -282,8 +282,8 @@ define <256 x i8> @vmin_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zero
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -325,8 +325,8 @@ define <256 x i8> @vmin_vx_v258i8_evl129(<256 x i8> %va, i8 %b, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -1021,8 +1021,8 @@ declare <32 x i64> @llvm.vp.smin.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vmin_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB74_2
@@ -1038,15 +1038,15 @@ define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmin.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB74_2
@@ -1060,8 +1060,8 @@ define <32 x i64> @vmin_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmin.vx v16, v16, a2, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.smin.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
index 48175e5b905b..671ce82d4ae7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vminu-vp.ll
@@ -281,8 +281,8 @@ define <256 x i8> @vminu_vx_v258i8(<256 x i8> %va, i8 %b, <256 x i1> %m, i32 zer
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -324,8 +324,8 @@ define <256 x i8> @vminu_vx_v258i8_evl129(<256 x i8> %va, i8 %b, <256 x i1> %m)
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 %b, i32 0
@@ -1020,8 +1020,8 @@ declare <32 x i64> @llvm.vp.umin.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i32)
define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vminu_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB74_2
@@ -1037,15 +1037,15 @@ define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vminu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vminu_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB74_2
@@ -1059,8 +1059,8 @@ define <32 x i64> @vminu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vminu.vx v16, v16, a2, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.umin.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
index a13f1eed8efb..028fb9a626f0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpgather.ll
@@ -297,10 +297,10 @@ define <32 x i8> @vpgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %
; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 16, e8, m2, ta, ma
; RV64-NEXT: vslidedown.vi v8, v8, 16
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: li a0, 32
@@ -1882,10 +1882,10 @@ define <32 x double> @vpgather_v32f64(<32 x ptr> %ptrs, <32 x i1> %m, i32 zeroex
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v8, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v8, 16
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: vmv8r.v v8, v24
@@ -1904,9 +1904,9 @@ define <32 x double> @vpgather_v32f64(<32 x ptr> %ptrs, <32 x i1> %m, i32 zeroex
; RV64-NEXT: addi a1, a0, -16
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
-; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a0, a0, a1
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (zero), v16, v0.t
; RV64-NEXT: ret
@@ -1933,10 +1933,10 @@ define <32 x double> @vpgather_baseidx_v32i8_v32f64(ptr %base, <32 x i8> %idxs,
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -1961,9 +1961,9 @@ define <32 x double> @vpgather_baseidx_v32i8_v32f64(ptr %base, <32 x i8> %idxs,
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -1991,10 +1991,10 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(ptr %base, <32 x i8> %i
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2020,9 +2020,9 @@ define <32 x double> @vpgather_baseidx_sext_v32i8_v32f64(ptr %base, <32 x i8> %i
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2051,10 +2051,10 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(ptr %base, <32 x i8> %i
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e16, m4, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e16, m4, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei16.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2077,10 +2077,10 @@ define <32 x double> @vpgather_baseidx_zext_v32i8_v32f64(ptr %base, <32 x i8> %i
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
; RV64-NEXT: and a1, a1, a2
-; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma
-; RV64-NEXT: vslidedown.vi v24, v16, 16
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: vsetivli zero, 16, e16, m4, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v16, 16
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei16.v v16, (a0), v24, v0.t
; RV64-NEXT: ret
@@ -2109,10 +2109,10 @@ define <32 x double> @vpgather_baseidx_v32i16_v32f64(ptr %base, <32 x i16> %idxs
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2137,9 +2137,9 @@ define <32 x double> @vpgather_baseidx_v32i16_v32f64(ptr %base, <32 x i16> %idxs
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2167,10 +2167,10 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(ptr %base, <32 x i16>
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2196,9 +2196,9 @@ define <32 x double> @vpgather_baseidx_sext_v32i16_v32f64(ptr %base, <32 x i16>
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2227,10 +2227,10 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(ptr %base, <32 x i16>
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2253,10 +2253,10 @@ define <32 x double> @vpgather_baseidx_zext_v32i16_v32f64(ptr %base, <32 x i16>
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
; RV64-NEXT: and a1, a1, a2
-; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV64-NEXT: vslidedown.vi v24, v16, 16
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV64-NEXT: vslidedown.vi v24, v16, 16
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV64-NEXT: ret
@@ -2270,8 +2270,8 @@ define <32 x double> @vpgather_baseidx_v32i32_v32f64(ptr %base, <32 x i32> %idxs
; RV32-LABEL: vpgather_baseidx_v32i32_v32f64:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: li a3, 16
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
; RV32-NEXT: mv a2, a1
; RV32-NEXT: bltu a1, a3, .LBB93_2
@@ -2284,10 +2284,10 @@ define <32 x double> @vpgather_baseidx_v32i32_v32f64(ptr %base, <32 x i32> %idxs
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2312,9 +2312,9 @@ define <32 x double> @vpgather_baseidx_v32i32_v32f64(ptr %base, <32 x i32> %idxs
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2327,8 +2327,8 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(ptr %base, <32 x i32>
; RV32-LABEL: vpgather_baseidx_sext_v32i32_v32f64:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: li a3, 16
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
; RV32-NEXT: mv a2, a1
; RV32-NEXT: bltu a1, a3, .LBB94_2
@@ -2341,10 +2341,10 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(ptr %base, <32 x i32>
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2370,9 +2370,9 @@ define <32 x double> @vpgather_baseidx_sext_v32i32_v32f64(ptr %base, <32 x i32>
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2386,8 +2386,8 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32>
; RV32-LABEL: vpgather_baseidx_zext_v32i32_v32f64:
; RV32: # %bb.0:
; RV32-NEXT: li a2, 32
-; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: li a3, 16
+; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
; RV32-NEXT: mv a2, a1
; RV32-NEXT: bltu a1, a3, .LBB95_2
@@ -2400,10 +2400,10 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32>
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a1, a1, a2
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
@@ -2429,9 +2429,9 @@ define <32 x double> @vpgather_baseidx_zext_v32i32_v32f64(ptr %base, <32 x i32>
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
@@ -2457,9 +2457,9 @@ define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x
; RV32-NEXT: addi a2, a1, -16
; RV32-NEXT: sltu a3, a1, a2
; RV32-NEXT: addi a3, a3, -1
-; RV32-NEXT: and a2, a3, a2
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: and a2, a3, a2
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: li a2, 16
@@ -2467,8 +2467,8 @@ define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB96_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
;
@@ -2488,9 +2488,9 @@ define <32 x double> @vpgather_baseidx_v32f64(ptr %base, <32 x i64> %idxs, <32 x
; RV64-NEXT: addi a2, a1, -16
; RV64-NEXT: sltu a1, a1, a2
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a1, a2
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
index 9ef89352e65e..f204d812c14f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpload.ll
@@ -377,9 +377,9 @@ define <32 x double> @vpload_v32f64(ptr %ptr, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: sltu a1, a1, a2
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a2
-; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a0), v0.t
; CHECK-NEXT: ret
@@ -405,9 +405,9 @@ define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: sltu a3, a3, a4
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a3, a3, a4
-; CHECK-NEXT: addi a4, a1, 128
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 2
+; CHECK-NEXT: addi a4, a1, 128
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a4), v0.t
; CHECK-NEXT: addi a3, a2, -32
@@ -419,17 +419,17 @@ define <33 x double> @vpload_v33f64(ptr %ptr, <33 x i1> %m, i32 zeroext %evl) {
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a4, 16
; CHECK-NEXT: .LBB32_4:
-; CHECK-NEXT: addi a5, a1, 256
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 4
+; CHECK-NEXT: addi a5, a1, 256
; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a5), v0.t
; CHECK-NEXT: bltu a2, a3, .LBB32_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB32_6:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a1), v0.t
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vse64.v v8, (a0)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
index 466448a7a05a..9f0561b394b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpmerge.ll
@@ -1193,17 +1193,17 @@ define <32 x double> @vpmerge_vv_v32f64(<32 x double> %va, <32 x double> %vb, <3
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
-; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v16, v16, v8, v0
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: csrr a0, vlenb
@@ -1229,9 +1229,9 @@ define <32 x double> @vpmerge_vf_v32f64(double %a, <32 x double> %vb, <32 x i1>
; CHECK-NEXT: addi a1, a0, -16
; CHECK-NEXT: sltu a0, a0, a1
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetvli zero, a0, e64, m8, tu, ma
; CHECK-NEXT: vfmerge.vfm v16, v16, fa0, v0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
index cd9a38d5167d..0c180cd148b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpscatter.ll
@@ -1685,10 +1685,10 @@ define void @vpscatter_v32f64(<32 x double> %val, <32 x ptr> %ptrs, <32 x i1> %m
; RV32-NEXT: sltu a1, a1, a0
; RV32-NEXT: addi a1, a1, -1
; RV32-NEXT: and a0, a1, a0
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: ret
@@ -1718,12 +1718,12 @@ define void @vpscatter_v32f64(<32 x double> %val, <32 x ptr> %ptrs, <32 x i1> %m
; RV64-NEXT: addi a0, a2, -16
; RV64-NEXT: sltu a1, a2, a0
; RV64-NEXT: addi a1, a1, -1
-; RV64-NEXT: and a0, a1, a0
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a0, a1, a0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
@@ -1753,10 +1753,10 @@ define void @vpscatter_baseidx_v32i32_v32f64(<32 x double> %val, ptr %base, <32
; RV32-NEXT: sltu a2, a2, a1
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a1, a2, a1
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: ret
@@ -1766,51 +1766,44 @@ define void @vpscatter_baseidx_v32i32_v32f64(<32 x double> %val, ptr %base, <32
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: csrr a3, vlenb
-; RV64-NEXT: li a4, 10
-; RV64-NEXT: mul a3, a3, a4
+; RV64-NEXT: slli a3, a3, 3
; RV64-NEXT: sub sp, sp, a3
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x0a, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 10 * vlenb
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; RV64-NEXT: li a3, 32
; RV64-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV64-NEXT: vle32.v v24, (a1)
+; RV64-NEXT: vmv1r.v v7, v0
; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vs1r.v v0, (a1) # Unknown-size Folded Spill
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; RV64-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV64-NEXT: vslidedown.vi v0, v24, 16
+; RV64-NEXT: vslidedown.vi v16, v24, 16
+; RV64-NEXT: vmv4r.v v0, v24
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf2 v16, v0
-; RV64-NEXT: vsll.vi v16, v16, 3
-; RV64-NEXT: vsext.vf2 v0, v24
+; RV64-NEXT: vsext.vf2 v24, v16
+; RV64-NEXT: vsll.vi v16, v24, 3
+; RV64-NEXT: vsext.vf2 v24, v0
; RV64-NEXT: li a3, 16
-; RV64-NEXT: vsll.vi v24, v0, 3
+; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: mv a1, a2
; RV64-NEXT: bltu a2, a3, .LBB80_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB80_2:
+; RV64-NEXT: vmv1r.v v0, v7
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: addi a1, a2, -16
; RV64-NEXT: sltu a2, a2, a1
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a1, a2, a1
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: vslidedown.vi v0, v7, 2
+; RV64-NEXT: and a1, a2, a1
+; RV64-NEXT: addi a2, sp, 16
+; RV64-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: li a1, 10
-; RV64-NEXT: mul a0, a0, a1
+; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -1838,10 +1831,10 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV32-NEXT: sltu a2, a2, a1
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a1, a2, a1
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: ret
@@ -1878,21 +1871,21 @@ define void @vpscatter_baseidx_sext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB81_2:
+; RV64-NEXT: addi a3, sp, 16
+; RV64-NEXT: vl1r.v v0, (a3) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: addi a1, a2, -16
; RV64-NEXT: sltu a2, a2, a1
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a1, a2, a1
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a2, a1
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 10
@@ -1925,10 +1918,10 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV32-NEXT: sltu a2, a2, a1
; RV32-NEXT: addi a2, a2, -1
; RV32-NEXT: and a1, a2, a1
-; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
-; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
+; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
+; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: ret
@@ -1965,21 +1958,21 @@ define void @vpscatter_baseidx_zext_v32i32_v32f64(<32 x double> %val, ptr %base,
; RV64-NEXT: # %bb.1:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB82_2:
+; RV64-NEXT: addi a3, sp, 16
+; RV64-NEXT: vl1r.v v0, (a3) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: addi a1, sp, 16
-; RV64-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: addi a1, a2, -16
; RV64-NEXT: sltu a2, a2, a1
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a1, a2, a1
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v0, 2
+; RV64-NEXT: and a1, a2, a1
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
-; RV64-NEXT: csrr a1, vlenb
-; RV64-NEXT: add a1, sp, a1
-; RV64-NEXT: addi a1, a1, 16
-; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 10
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
index c0aa735614b2..f396790f4f17 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vpstore.ll
@@ -295,9 +295,9 @@ define void @vpstore_v32f64(<32 x double> %val, ptr %ptr, <32 x i1> %m, i32 zero
; CHECK-NEXT: sltu a1, a1, a2
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a2
-; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: addi a0, a0, 128
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vse64.v v16, (a0), v0.t
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
index 291629de6dcf..df2c83028e5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsadd-vp.ll
@@ -386,8 +386,8 @@ define <256 x i8> @vsadd_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %e
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
@@ -425,8 +425,8 @@ define <256 x i8> @vsadd_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
@@ -442,8 +442,8 @@ define <256 x i8> @vsadd_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.sadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
@@ -1361,8 +1361,8 @@ declare <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i
define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsadd_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB108_2
@@ -1378,15 +1378,15 @@ define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsadd_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB108_2
@@ -1399,8 +1399,8 @@ define <32 x i64> @vsadd_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
@@ -1462,8 +1462,8 @@ define <32 x i64> @vsadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vsadd.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vsadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1473,8 +1473,8 @@ define <32 x i64> @vsadd_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV64-NEXT: vsadd.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vsadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
@@ -1491,8 +1491,8 @@ define <32 x i64> @vsadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vsadd.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vsadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1502,8 +1502,8 @@ define <32 x i64> @vsadd_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vsadd.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vsadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.sadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
index d38ee1148e89..f50dadf01991 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vsaddu-vp.ll
@@ -382,8 +382,8 @@ define <256 x i8> @vsaddu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
@@ -421,8 +421,8 @@ define <256 x i8> @vsaddu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
@@ -438,8 +438,8 @@ define <256 x i8> @vsaddu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v16, v16, -1, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.uadd.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
@@ -1357,8 +1357,8 @@ declare <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i
define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsaddu_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB108_2
@@ -1374,15 +1374,15 @@ define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vsaddu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsaddu_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB108_2
@@ -1395,8 +1395,8 @@ define <32 x i64> @vsaddu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vsaddu.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
@@ -1458,8 +1458,8 @@ define <32 x i64> @vsaddu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vsaddu.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vsaddu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1469,8 +1469,8 @@ define <32 x i64> @vsaddu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV64-NEXT: vsaddu.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vsaddu.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
@@ -1487,8 +1487,8 @@ define <32 x i64> @vsaddu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vsaddu.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vsaddu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1498,8 +1498,8 @@ define <32 x i64> @vsaddu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vsaddu.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vsaddu.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.uadd.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll
index 12d96fbfb88d..4f533f2055bf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vscale-range.ll
@@ -24,17 +24,17 @@ define <512 x i8> @vadd_v512i8_zvl128(<512 x i8> %a, <512 x i8> %b) #0 {
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: li a2, 128
+; CHECK-NEXT: addi a4, a3, 128
+; CHECK-NEXT: addi a5, a3, 384
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; CHECK-NEXT: addi a2, a3, 128
-; CHECK-NEXT: addi a4, a3, 384
-; CHECK-NEXT: vle8.v v8, (a4)
-; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: vle8.v v8, (a5)
+; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: li a5, 24
-; CHECK-NEXT: mul a4, a4, a5
-; CHECK-NEXT: add a4, sp, a4
-; CHECK-NEXT: addi a4, a4, 16
-; CHECK-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill
-; CHECK-NEXT: addi a4, a1, 128
+; CHECK-NEXT: mul a2, a2, a5
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: addi a2, a1, 128
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
@@ -48,10 +48,10 @@ define <512 x i8> @vadd_v512i8_zvl128(<512 x i8> %a, <512 x i8> %b) #0 {
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vle8.v v8, (a4)
+; CHECK-NEXT: vle8.v v8, (a2)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vle8.v v24, (a2)
+; CHECK-NEXT: vle8.v v24, (a4)
; CHECK-NEXT: vle8.v v0, (a3)
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
index d05f580ea7d2..0a2ed3eb1ffb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vselect-vp.ll
@@ -175,19 +175,18 @@ define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i3
; CHECK-NEXT: vle8.v v16, (a0)
; CHECK-NEXT: addi a0, a3, -128
; CHECK-NEXT: sltu a4, a3, a0
-; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: vle8.v v0, (a1)
; CHECK-NEXT: addi a1, sp, 16
; CHECK-NEXT: vs8r.v v0, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a0, a4, a0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v24, v16, v24, v0
; CHECK-NEXT: bltu a3, a2, .LBB11_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a3, 128
; CHECK-NEXT: .LBB11_2:
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -196,6 +195,7 @@ define <256 x i8> @select_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c, i3
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
; CHECK-NEXT: vmv8r.v v16, v24
; CHECK-NEXT: csrr a0, vlenb
@@ -221,39 +221,39 @@ define <256 x i8> @select_evl_v256i8(<256 x i1> %a, <256 x i8> %b, <256 x i8> %c
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vle8.v v24, (a0)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, a1, 128
; CHECK-NEXT: vle8.v v24, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vle8.v v24, (a1)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vle8.v v16, (a1)
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmerge.vvm v24, v24, v16, v0
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
+; CHECK-NEXT: vmerge.vvm v24, v8, v24, v0
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmerge.vvm v8, v16, v8, v0
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v8, v16, v0
; CHECK-NEXT: vmv8r.v v16, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
@@ -437,12 +437,12 @@ define <32 x i64> @select_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32
; CHECK-NEXT: addi a0, a2, -16
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
-; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -456,15 +456,41 @@ define <32 x i64> @select_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32
define <32 x i64> @select_evl_v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c) {
; CHECK-LABEL: select_evl_v32i64:
; CHECK: # %bb.0:
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: addi a1, a0, 128
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a0)
-; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
-; CHECK-NEXT: addi a0, a0, 128
-; CHECK-NEXT: vle64.v v24, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vle64.v v24, (a1)
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v0, v0, 2
+; CHECK-NEXT: vslidedown.vi v7, v0, 2
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
+; CHECK-NEXT: vmv1r.v v0, v7
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsetivli zero, 1, e64, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <32 x i64> @llvm.vp.select.v32i64(<32 x i1> %a, <32 x i64> %b, <32 x i64> %c, i32 17)
ret <32 x i64> %v
@@ -594,12 +620,12 @@ define <64 x float> @select_v64f32(<64 x i1> %a, <64 x float> %b, <64 x float> %
; CHECK-NEXT: addi a0, a2, -32
; CHECK-NEXT: sltu a1, a2, a0
; CHECK-NEXT: addi a1, a1, -1
-; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 4
+; CHECK-NEXT: and a0, a1, a0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmerge.vvm v16, v24, v16, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
index 2caa2ff41a7d..b82ca70477ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssub-vp.ll
@@ -399,8 +399,8 @@ define <256 x i8> @vssub_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %e
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
@@ -440,8 +440,8 @@ define <256 x i8> @vssub_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
@@ -458,8 +458,8 @@ define <256 x i8> @vssub_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.ssub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
@@ -1401,8 +1401,8 @@ declare <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i
define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vssub_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB108_2
@@ -1418,15 +1418,15 @@ define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vssub.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vssub_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB108_2
@@ -1440,8 +1440,8 @@ define <32 x i64> @vssub_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %ev
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vssub.vx v16, v16, a2, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
@@ -1504,8 +1504,8 @@ define <32 x i64> @vssub_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vssub.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vssub.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1516,8 +1516,8 @@ define <32 x i64> @vssub_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: li a0, -1
; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vssub.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
@@ -1534,8 +1534,8 @@ define <32 x i64> @vssub_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vssub.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vssub.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1546,8 +1546,8 @@ define <32 x i64> @vssub_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: li a0, -1
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vssub.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vssub.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.ssub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
index 6313f31bc1a6..6d8ed563f02b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vssubu-vp.ll
@@ -394,8 +394,8 @@ define <256 x i8> @vssubu_vi_v258i8(<256 x i8> %va, <256 x i1> %m, i32 zeroext %
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 %evl)
@@ -435,8 +435,8 @@ define <256 x i8> @vssubu_vi_v258i8_evl129(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 129)
@@ -453,8 +453,8 @@ define <256 x i8> @vssubu_vi_v258i8_evl128(<256 x i8> %va, <256 x i1> %m) {
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: li a0, -1
; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
%v = call <256 x i8> @llvm.vp.usub.sat.v258i8(<256 x i8> %va, <256 x i8> splat (i8 -1), <256 x i1> %m, i32 128)
@@ -1396,8 +1396,8 @@ declare <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64>, <32 x i64>, <32 x i1>, i
define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vssubu_vx_v32i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: li a2, 16
+; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v7, v0, 2
; RV32-NEXT: mv a1, a0
; RV32-NEXT: bltu a0, a2, .LBB108_2
@@ -1413,15 +1413,15 @@ define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
; RV32-NEXT: sltu a0, a0, a1
; RV32-NEXT: addi a0, a0, -1
; RV32-NEXT: and a0, a0, a1
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vssubu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vssubu_vx_v32i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: li a2, 16
+; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
; RV64-NEXT: mv a1, a0
; RV64-NEXT: bltu a0, a2, .LBB108_2
@@ -1435,8 +1435,8 @@ define <32 x i64> @vssubu_vx_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %e
; RV64-NEXT: sltu a0, a0, a1
; RV64-NEXT: addi a0, a0, -1
; RV64-NEXT: and a0, a0, a1
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vssubu.vx v16, v16, a2, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 %evl)
@@ -1499,8 +1499,8 @@ define <32 x i64> @vssubu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vssubu.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV32-NEXT: vssubu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1511,8 +1511,8 @@ define <32 x i64> @vssubu_vx_v32i64_evl12(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: li a0, -1
; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e64, m8, ta, ma
; RV64-NEXT: vssubu.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 12)
@@ -1529,8 +1529,8 @@ define <32 x i64> @vssubu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV32-NEXT: vmv.v.i v24, -1
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vssubu.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v7
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vssubu.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
;
@@ -1541,8 +1541,8 @@ define <32 x i64> @vssubu_vx_v32i64_evl27(<32 x i64> %va, <32 x i1> %m) {
; RV64-NEXT: li a0, -1
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vssubu.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vssubu.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%v = call <32 x i64> @llvm.vp.usub.sat.v32i64(<32 x i64> %va, <32 x i64> splat (i64 -1), <32 x i1> %m, i32 27)
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
index b1726be941e3..d6ca6c5a4b83 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwadd.ll
@@ -263,13 +263,12 @@ define <128 x i16> @vwadd_v128i16(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwadd.vv v8, v16, v24
+; CHECK-NEXT: vwadd.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwadd.vv v8, v16, v0
@@ -309,13 +308,12 @@ define <64 x i32> @vwadd_v64i32(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwadd.vv v8, v16, v24
+; CHECK-NEXT: vwadd.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwadd.vv v8, v16, v0
@@ -354,13 +352,12 @@ define <32 x i64> @vwadd_v32i64(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwadd.vv v8, v16, v24
+; CHECK-NEXT: vwadd.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwadd.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
index f6d9695c5149..61378a424ecb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwaddu.ll
@@ -263,13 +263,12 @@ define <128 x i16> @vwaddu_v128i16(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwaddu.vv v8, v16, v24
+; CHECK-NEXT: vwaddu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwaddu.vv v8, v16, v0
@@ -309,13 +308,12 @@ define <64 x i32> @vwaddu_v64i32(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwaddu.vv v8, v16, v24
+; CHECK-NEXT: vwaddu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwaddu.vv v8, v16, v0
@@ -354,13 +352,12 @@ define <32 x i64> @vwaddu_v32i64(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwaddu.vv v8, v16, v24
+; CHECK-NEXT: vwaddu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwaddu.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
index c87584ab6351..93927e10e607 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmul.ll
@@ -289,13 +289,12 @@ define <128 x i16> @vwmul_v128i16(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmul.vv v8, v16, v24
+; CHECK-NEXT: vwmul.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmul.vv v8, v16, v0
@@ -337,13 +336,12 @@ define <64 x i32> @vwmul_v64i32(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmul.vv v8, v16, v24
+; CHECK-NEXT: vwmul.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmul.vv v8, v16, v0
@@ -384,13 +382,12 @@ define <32 x i64> @vwmul_v32i64(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmul.vv v8, v16, v24
+; CHECK-NEXT: vwmul.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmul.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
index a56984577ea7..ee114350a432 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulsu.ll
@@ -281,13 +281,12 @@ define <128 x i16> @vwmulsu_v128i16(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmulsu.vv v8, v24, v16
+; CHECK-NEXT: vwmulsu.vv v24, v8, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulsu.vv v8, v0, v16
@@ -329,13 +328,12 @@ define <64 x i32> @vwmulsu_v64i32(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmulsu.vv v8, v24, v16
+; CHECK-NEXT: vwmulsu.vv v24, v8, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulsu.vv v8, v0, v16
@@ -376,13 +374,12 @@ define <32 x i64> @vwmulsu_v32i64(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmulsu.vv v8, v24, v16
+; CHECK-NEXT: vwmulsu.vv v24, v8, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulsu.vv v8, v0, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
index b97c9654ad3c..17a76ae5e7f7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwmulu.ll
@@ -265,13 +265,12 @@ define <128 x i16> @vwmulu_v128i16(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmulu.vv v8, v16, v24
+; CHECK-NEXT: vwmulu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulu.vv v8, v16, v0
@@ -313,13 +312,12 @@ define <64 x i32> @vwmulu_v64i32(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmulu.vv v8, v16, v24
+; CHECK-NEXT: vwmulu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulu.vv v8, v16, v0
@@ -360,13 +358,12 @@ define <32 x i64> @vwmulu_v32i64(ptr %x, ptr %y) {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwmulu.vv v8, v16, v24
+; CHECK-NEXT: vwmulu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwmulu.vv v8, v16, v0
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
index 2782a5fbb1ea..a2675d59ade9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsub.ll
@@ -263,13 +263,12 @@ define <128 x i16> @vwsub_v128i16(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwsub.vv v8, v16, v24
+; CHECK-NEXT: vwsub.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsub.vv v8, v16, v0
@@ -309,13 +308,12 @@ define <64 x i32> @vwsub_v64i32(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwsub.vv v8, v16, v24
+; CHECK-NEXT: vwsub.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsub.vv v8, v16, v0
@@ -354,13 +352,12 @@ define <32 x i64> @vwsub_v32i64(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwsub.vv v8, v16, v24
+; CHECK-NEXT: vwsub.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsub.vv v8, v16, v0
@@ -715,8 +712,8 @@ define <8 x i16> @vwsub_vx_v8i16_i16(ptr %x, ptr %y) {
define <4 x i32> @vwsub_vx_v4i32_i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwsub_vx_v4i32_i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: lb a1, 0(a1)
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v9, (a0)
; CHECK-NEXT: vmv.v.x v10, a1
; CHECK-NEXT: vwsub.vv v8, v10, v9
@@ -779,8 +776,8 @@ define <2 x i64> @vwsub_vx_v2i64_i8(ptr %x, ptr %y) nounwind {
;
; RV64-LABEL: vwsub_vx_v2i64_i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: lb a1, 0(a1)
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vle32.v v9, (a0)
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vwsub.vv v8, v10, v9
@@ -808,8 +805,8 @@ define <2 x i64> @vwsub_vx_v2i64_i16(ptr %x, ptr %y) nounwind {
;
; RV64-LABEL: vwsub_vx_v2i64_i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: lh a1, 0(a1)
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vle32.v v9, (a0)
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vwsub.vv v8, v10, v9
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
index ccbc26c84d80..1a9e3aac0034 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vwsubu.ll
@@ -263,13 +263,12 @@ define <128 x i16> @vwsubu_v128i16(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwsubu.vv v8, v16, v24
+; CHECK-NEXT: vwsubu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsubu.vv v8, v16, v0
@@ -309,13 +308,12 @@ define <64 x i32> @vwsubu_v64i32(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vx v16, v8, a0
; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwsubu.vv v8, v16, v24
+; CHECK-NEXT: vwsubu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsubu.vv v8, v16, v0
@@ -354,13 +352,12 @@ define <32 x i64> @vwsubu_v32i64(ptr %x, ptr %y) nounwind {
; CHECK-NEXT: vslidedown.vi v16, v8, 16
; CHECK-NEXT: vslidedown.vi v8, v0, 16
; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT: vmv4r.v v24, v8
-; CHECK-NEXT: vwsubu.vv v8, v16, v24
+; CHECK-NEXT: vwsubu.vv v24, v16, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vwsubu.vv v8, v16, v0
@@ -715,8 +712,8 @@ define <8 x i16> @vwsubu_vx_v8i16_i16(ptr %x, ptr %y) {
define <4 x i32> @vwsubu_vx_v4i32_i8(ptr %x, ptr %y) {
; CHECK-LABEL: vwsubu_vx_v4i32_i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: lbu a1, 0(a1)
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v9, (a0)
; CHECK-NEXT: vmv.v.x v10, a1
; CHECK-NEXT: vwsubu.vv v8, v10, v9
@@ -783,8 +780,8 @@ define <2 x i64> @vwsubu_vx_v2i64_i8(ptr %x, ptr %y) nounwind {
;
; RV64-LABEL: vwsubu_vx_v2i64_i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: lbu a1, 0(a1)
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vle32.v v9, (a0)
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vwsubu.vv v8, v10, v9
@@ -816,8 +813,8 @@ define <2 x i64> @vwsubu_vx_v2i64_i16(ptr %x, ptr %y) nounwind {
;
; RV64-LABEL: vwsubu_vx_v2i64_i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: lhu a1, 0(a1)
+; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vle32.v v9, (a0)
; RV64-NEXT: vmv.v.x v10, a1
; RV64-NEXT: vwsubu.vv v8, v10, v9
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll
index f4d679cd57ca..df90dae379c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-zext-vp.ll
@@ -151,8 +151,8 @@ declare <32 x i64> @llvm.vp.zext.v32i64.v32i32(<32 x i32>, <32 x i1>, i32)
define <32 x i64> @vzext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vzext_v32i64_v32i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: li a2, 16
+; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v0, 2
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: bltu a0, a2, .LBB12_2
@@ -167,8 +167,8 @@ define <32 x i64> @vzext_v32i64_v32i32(<32 x i32> %va, <32 x i1> %m, i32 zeroext
; CHECK-NEXT: and a0, a0, a1
; CHECK-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 16
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vzext.vf2 v16, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
index d464b491bbbe..26a3e053bf7a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/floor-vp.ll
@@ -135,16 +135,16 @@ declare <vscale x 8 x half> @llvm.vp.floor.nxv8f16(<vscale x 8 x half>, <vscale
define <vscale x 8 x half> @vp_floor_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI6_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -179,16 +179,16 @@ declare <vscale x 16 x half> @llvm.vp.floor.nxv16f16(<vscale x 16 x half>, <vsca
define <vscale x 16 x half> @vp_floor_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI8_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -223,16 +223,16 @@ declare <vscale x 32 x half> @llvm.vp.floor.nxv32f16(<vscale x 32 x half>, <vsca
define <vscale x 32 x half> @vp_floor_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI10_0)
; CHECK-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -359,8 +359,8 @@ define <vscale x 4 x float> @vp_floor_nxv4f32(<vscale x 4 x float> %va, <vscale
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -403,8 +403,8 @@ define <vscale x 8 x float> @vp_floor_nxv8f32(<vscale x 8 x float> %va, <vscale
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -447,8 +447,8 @@ define <vscale x 16 x float> @vp_floor_nxv16f32(<vscale x 16 x float> %va, <vsca
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -525,16 +525,16 @@ declare <vscale x 2 x double> @llvm.vp.floor.nxv2f64(<vscale x 2 x double>, <vsc
define <vscale x 2 x double> @vp_floor_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -569,16 +569,16 @@ declare <vscale x 4 x double> @llvm.vp.floor.nxv4f64(<vscale x 4 x double>, <vsc
define <vscale x 4 x double> @vp_floor_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -613,16 +613,16 @@ declare <vscale x 7 x double> @llvm.vp.floor.nxv7f64(<vscale x 7 x double>, <vsc
define <vscale x 7 x double> @vp_floor_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -657,16 +657,16 @@ declare <vscale x 8 x double> @llvm.vp.floor.nxv8f64(<vscale x 8 x double>, <vsc
define <vscale x 8 x double> @vp_floor_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_floor_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -705,66 +705,56 @@ define <vscale x 16 x double> @vp_floor_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a2
+; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a2, 2
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 2
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
index 386f23f68c35..05896d8ef6ff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-sdnode.ll
@@ -214,10 +214,7 @@ define <vscale x 32 x half> @vfmax_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
@@ -229,38 +226,31 @@ define <vscale x 32 x half> @vfmax_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v24, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v8, v0
-; ZVFHMIN-NEXT: vfmax.vv v24, v8, v16
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfmax.vv v8, v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v16, v16
; ZVFHMIN-NEXT: vmfeq.vv v7, v8, v8
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v8, v0
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v16, v0
+; ZVFHMIN-NEXT: vmerge.vvm v8, v8, v16, v0
+; ZVFHMIN-NEXT: vfmax.vv v16, v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmax.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
index 02cfd3de6b4d..ab07fff59b21 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fmaximum-vp.ll
@@ -177,8 +177,8 @@ define <vscale x 4 x half> @vfmax_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v16, v12, v14, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v8, v14, v14, v0.t
@@ -253,8 +253,8 @@ define <vscale x 8 x half> @vfmax_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v20, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmfeq.vv v8, v20, v20, v0.t
@@ -335,8 +335,8 @@ define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
@@ -444,13 +444,8 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x22, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 34 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 25
-; ZVFHMIN-NEXT: mul a1, a1, a2
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: vmv1r.v v24, v0
+; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -458,35 +453,36 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi a4, a4, -1
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: li a5, 24
-; ZVFHMIN-NEXT: mul a4, a4, a5
+; ZVFHMIN-NEXT: slli a4, a4, 5
; ZVFHMIN-NEXT: add a4, sp, a4
; ZVFHMIN-NEXT: addi a4, a4, 16
-; ZVFHMIN-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
+; ZVFHMIN-NEXT: vs1r.v v24, (a4) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v8, v24, a2
; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vs8r.v v0, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: li a4, 25
+; ZVFHMIN-NEXT: li a4, 24
; ZVFHMIN-NEXT: mul a2, a2, a4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
@@ -523,45 +519,34 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 24
-; ZVFHMIN-NEXT: mul a1, a1, a2
+; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v24, v0.t
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 4
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 25
+; ZVFHMIN-NEXT: li a2, 24
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vmerge.vvm v24, v24, v16, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
@@ -573,18 +558,18 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v9
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmax.vv v16, v16, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 34
@@ -627,19 +612,18 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v7, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv4r.v v8, v16
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: li a4, 24
@@ -647,11 +631,12 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v24, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
@@ -683,13 +668,13 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: li a2, 24
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v3, v16, v16
@@ -1027,13 +1012,13 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a3, a3, 16
; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: li a5, 18
; CHECK-NEXT: mul a4, a4, a5
; CHECK-NEXT: add a4, sp, a4
; CHECK-NEXT: addi a4, a4, 16
; CHECK-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v7, v0, a3
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: add a3, sp, a3
@@ -1043,13 +1028,13 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: sltu a4, a2, a3
; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a3, a3, 1
-; CHECK-NEXT: add a3, sp, a3
-; CHECK-NEXT: addi a3, a3, 16
-; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a4, a4, 1
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v26, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a3, vlenb
@@ -1112,7 +1097,6 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB28_2:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 18
; CHECK-NEXT: mul a0, a0, a1
@@ -1120,6 +1104,7 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl1r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a0, vlenb
@@ -1221,12 +1206,12 @@ define <vscale x 16 x double> @vfmax_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB29_2:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v16, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
index 48baa12aa2e5..e94259392498 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-sdnode.ll
@@ -214,10 +214,7 @@ define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
@@ -229,38 +226,31 @@ define <vscale x 32 x half> @vfmin_nxv32f16_vv(<vscale x 32 x half> %a, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v24, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v8, v0
-; ZVFHMIN-NEXT: vfmin.vv v24, v8, v16
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfmin.vv v8, v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v16, v16
; ZVFHMIN-NEXT: vmfeq.vv v7, v8, v8
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v8, v0
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vmerge.vvm v16, v8, v16, v0
+; ZVFHMIN-NEXT: vmerge.vvm v8, v8, v16, v0
+; ZVFHMIN-NEXT: vfmin.vv v16, v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmin.vv v16, v16, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
index 72a47ca2a605..fc5b11284dab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fminimum-vp.ll
@@ -177,8 +177,8 @@ define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x
; ZVFHMIN-NEXT: vmfeq.vv v8, v12, v12, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v14, v9
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v16, v12, v14, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v10
; ZVFHMIN-NEXT: vmfeq.vv v8, v14, v14, v0.t
@@ -253,8 +253,8 @@ define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v20, v10
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v24, v16, v20, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: vmfeq.vv v8, v20, v20, v0.t
@@ -335,8 +335,8 @@ define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
@@ -444,13 +444,8 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x22, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 34 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 25
-; ZVFHMIN-NEXT: mul a1, a1, a2
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
+; ZVFHMIN-NEXT: vmv1r.v v24, v0
+; ZVFHMIN-NEXT: vmv8r.v v0, v8
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -458,35 +453,36 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi a4, a4, -1
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: li a5, 24
-; ZVFHMIN-NEXT: mul a4, a4, a5
+; ZVFHMIN-NEXT: slli a4, a4, 5
; ZVFHMIN-NEXT: add a4, sp, a4
; ZVFHMIN-NEXT: addi a4, a4, 16
-; ZVFHMIN-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
+; ZVFHMIN-NEXT: vs1r.v v24, (a4) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v8, v24, a2
; ZVFHMIN-NEXT: addi a2, sp, 16
-; ZVFHMIN-NEXT: vs1r.v v0, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vs1r.v v8, (a2) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vs8r.v v0, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vmv8r.v v0, v16
; ZVFHMIN-NEXT: csrr a2, vlenb
-; ZVFHMIN-NEXT: li a4, 25
+; ZVFHMIN-NEXT: li a4, 24
; ZVFHMIN-NEXT: mul a2, a2, a4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v24, v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
@@ -523,45 +519,34 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 24
-; ZVFHMIN-NEXT: mul a1, a1, a2
+; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl1r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v24, v0.t
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 4
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: addi a1, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 25
+; ZVFHMIN-NEXT: li a2, 24
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vmerge.vvm v24, v24, v16, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
@@ -573,18 +558,18 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vmerge.vvm v16, v16, v24, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v9
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 25
+; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfmin.vv v16, v16, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 34
@@ -627,19 +612,18 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v7, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 4
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v12, v24, v24, v0.t
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vmv4r.v v8, v16
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: li a4, 24
@@ -647,11 +631,12 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: add a2, sp, a2
; ZVFHMIN-NEXT: addi a2, a2, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmerge.vvm v8, v16, v24, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a2, a2, 3
@@ -683,13 +668,13 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v0, v8, v8
-; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: csrr a1, vlenb
; ZVFHMIN-NEXT: li a2, 24
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmfeq.vv v3, v16, v16
@@ -1027,13 +1012,13 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a3, a3, 16
; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: srli a3, a1, 3
-; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: li a5, 18
; CHECK-NEXT: mul a4, a4, a5
; CHECK-NEXT: add a4, sp, a4
; CHECK-NEXT: addi a4, a4, 16
; CHECK-NEXT: vs1r.v v0, (a4) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v7, v0, a3
; CHECK-NEXT: csrr a3, vlenb
; CHECK-NEXT: add a3, sp, a3
@@ -1043,13 +1028,13 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: sltu a4, a2, a3
; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a3, a3, 1
-; CHECK-NEXT: add a3, sp, a3
-; CHECK-NEXT: addi a3, a3, 16
-; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a4, vlenb
+; CHECK-NEXT: slli a4, a4, 1
+; CHECK-NEXT: add a4, sp, a4
+; CHECK-NEXT: addi a4, a4, 16
+; CHECK-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v26, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v26
; CHECK-NEXT: csrr a3, vlenb
@@ -1112,7 +1097,6 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB28_2:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 18
; CHECK-NEXT: mul a0, a0, a1
@@ -1120,6 +1104,7 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl1r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v25, v16, v16, v0.t
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: csrr a0, vlenb
@@ -1221,12 +1206,12 @@ define <vscale x 16 x double> @vfmin_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB29_2:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v0, v16, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
index bb28ff5c6dc4..aa845bd8bb0b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fpclamptosat_vec.ll
@@ -448,8 +448,8 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -467,8 +467,8 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -609,8 +609,8 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -628,8 +628,8 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -780,8 +780,8 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -799,8 +799,8 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -1397,8 +1397,8 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -1422,8 +1422,8 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -1440,11 +1440,11 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -1454,8 +1454,8 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -1473,8 +1473,8 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -1485,18 +1485,18 @@ define <8 x i16> @stest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v10, v8, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnclip.wi v8, v10, 0
@@ -1710,8 +1710,8 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -1735,8 +1735,8 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -1753,11 +1753,11 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -1767,8 +1767,8 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -1786,8 +1786,8 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -1798,18 +1798,18 @@ define <8 x i16> @utesth_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v10, v8, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnclipu.wi v8, v10, 0
@@ -2045,8 +2045,8 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -2070,8 +2070,8 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -2088,11 +2088,11 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -2102,8 +2102,8 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -2121,8 +2121,8 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -2133,18 +2133,18 @@ define <8 x i16> @ustest_f16i16(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v10, 4
; CHECK-V-NEXT: lui a0, 16
; CHECK-V-NEXT: addi a0, a0, -1
@@ -2279,9 +2279,9 @@ define <2 x i64> @stest_f64i64(<2 x double> %x) {
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: li a2, -1
@@ -2412,9 +2412,9 @@ define <2 x i64> @utest_f64i64(<2 x double> %x) {
; CHECK-V-NEXT: call __fixunsdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixunsdfti
; CHECK-V-NEXT: snez a1, a1
@@ -2524,9 +2524,9 @@ define <2 x i64> @ustest_f64i64(<2 x double> %x) {
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv a2, s1
@@ -2686,9 +2686,9 @@ define <2 x i64> @stest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: li a2, -1
@@ -2819,9 +2819,9 @@ define <2 x i64> @utest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: snez a1, a1
@@ -2931,9 +2931,9 @@ define <2 x i64> @ustest_f32i64(<2 x float> %x) {
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv a2, s1
@@ -3819,8 +3819,8 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -3838,8 +3838,8 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -3978,8 +3978,8 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -3997,8 +3997,8 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -4148,8 +4148,8 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -4167,8 +4167,8 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -4753,8 +4753,8 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -4778,8 +4778,8 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -4796,11 +4796,11 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -4810,8 +4810,8 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -4829,8 +4829,8 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -4841,18 +4841,18 @@ define <8 x i16> @stest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v10, v8, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnclip.wi v8, v10, 0
@@ -5064,8 +5064,8 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -5089,8 +5089,8 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -5107,11 +5107,11 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -5121,8 +5121,8 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -5140,8 +5140,8 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.lu.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -5152,18 +5152,18 @@ define <8 x i16> @utesth_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v10, v8, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 2
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vslideup.vi v10, v8, 4
; CHECK-V-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-V-NEXT: vnclipu.wi v8, v10, 0
@@ -5398,8 +5398,8 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, a0
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s6
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -5423,8 +5423,8 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s5
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s4
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -5441,11 +5441,11 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
@@ -5455,8 +5455,8 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s3
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s2
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -5474,8 +5474,8 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: fmv.w.x fa0, s1
; CHECK-V-NEXT: call __extendhfsf2
; CHECK-V-NEXT: fcvt.l.s a0, fa0, rtz
-; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: fmv.w.x fa0, s0
+; CHECK-V-NEXT: vsetivli zero, 1, e32, m1, ta, ma
; CHECK-V-NEXT: vmv.s.x v8, a0
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
@@ -5486,18 +5486,18 @@ define <8 x i16> @ustest_f16i16_mm(<8 x half> %x) {
; CHECK-V-NEXT: addi a0, sp, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; CHECK-V-NEXT: vslideup.vi v8, v9, 1
-; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v9, 2
-; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: csrr a0, vlenb
; CHECK-V-NEXT: slli a0, a0, 1
; CHECK-V-NEXT: add a0, sp, a0
; CHECK-V-NEXT: addi a0, a0, 16
; CHECK-V-NEXT: vl2r.v v10, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-V-NEXT: vslideup.vi v8, v10, 4
; CHECK-V-NEXT: lui a0, 16
; CHECK-V-NEXT: addi a0, a0, -1
@@ -5633,9 +5633,9 @@ define <2 x i64> @stest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: li a2, -1
@@ -5766,9 +5766,9 @@ define <2 x i64> @utest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: call __fixunsdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vslidedown.vi v8, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixunsdfti
@@ -5867,9 +5867,9 @@ define <2 x i64> @ustest_f64i64_mm(<2 x double> %x) {
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixdfti
; CHECK-V-NEXT: mv a2, a1
@@ -6019,9 +6019,9 @@ define <2 x i64> @stest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: li a2, -1
@@ -6152,9 +6152,9 @@ define <2 x i64> @utest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: call __fixunssfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vslidedown.vi v8, v8, 1
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixunssfti
@@ -6253,9 +6253,9 @@ define <2 x i64> @ustest_f32i64_mm(<2 x float> %x) {
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv s0, a0
; CHECK-V-NEXT: mv s1, a1
-; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: addi a0, sp, 32
; CHECK-V-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-V-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-V-NEXT: vfmv.f.s fa0, v8
; CHECK-V-NEXT: call __fixsfti
; CHECK-V-NEXT: mv a2, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll
index 8f36aad81727..c45af61ced94 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fptosi-sat.ll
@@ -163,12 +163,11 @@ define <vscale x 4 x i16> @test_signed_v4f64_v4i16(<vscale x 4 x double> %f) {
; CHECK-NEXT: vfmin.vf v12, v12, fa4
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v12
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vnsrl.wi v12, v16, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmfne.vv v0, v8, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v12, 0, v0
+; CHECK-NEXT: vnsrl.wi v8, v16, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%x = call <vscale x 4 x i16> @llvm.fptosi.sat.nxv4f64.nxv4i16(<vscale x 4 x double> %f)
ret <vscale x 4 x i16> %x
@@ -186,12 +185,11 @@ define <vscale x 8 x i16> @test_signed_v8f64_v8i16(<vscale x 8 x double> %f) {
; CHECK-NEXT: vfmin.vf v16, v16, fa4
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.rtz.x.f.w v24, v16
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vnsrl.wi v16, v24, 0
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmfne.vv v0, v8, v8
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v8, v16, 0, v0
+; CHECK-NEXT: vnsrl.wi v8, v24, 0
+; CHECK-NEXT: vmerge.vim v8, v8, 0, v0
; CHECK-NEXT: ret
%x = call <vscale x 8 x i16> @llvm.fptosi.sat.nxv8f64.nxv8i16(<vscale x 8 x double> %f)
ret <vscale x 8 x i16> %x
diff --git a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
index 249f765971b0..bc5617957d7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fshr-fshl-vp.ll
@@ -960,141 +960,158 @@ define <vscale x 16 x i64> @fshr_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: li a3, 40
+; CHECK-NEXT: li a3, 48
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: li a3, 24
+; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: li a3, 24
-; CHECK-NEXT: mul a1, a1, a3
+; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: csrr a3, vlenb
-; CHECK-NEXT: slli a1, a3, 3
-; CHECK-NEXT: add a5, a0, a1
-; CHECK-NEXT: srli a6, a3, 3
-; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
-; CHECK-NEXT: add a1, a2, a1
-; CHECK-NEXT: vl8re64.v v16, (a1)
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: sub a1, a4, a3
-; CHECK-NEXT: sltu a7, a4, a1
-; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: and a7, a7, a1
-; CHECK-NEXT: li a1, 63
+; CHECK-NEXT: slli a3, a1, 3
+; CHECK-NEXT: add a5, a0, a3
+; CHECK-NEXT: add a3, a2, a3
+; CHECK-NEXT: vl8re64.v v16, (a3)
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: slli a3, a3, 4
+; CHECK-NEXT: add a3, sp, a3
+; CHECK-NEXT: addi a3, a3, 16
+; CHECK-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; CHECK-NEXT: sub a3, a4, a1
+; CHECK-NEXT: sltu a6, a4, a3
+; CHECK-NEXT: addi a6, a6, -1
+; CHECK-NEXT: and a6, a6, a3
+; CHECK-NEXT: srli a3, a1, 3
; CHECK-NEXT: vl8re64.v v8, (a5)
; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: slli a5, a5, 5
+; CHECK-NEXT: li a7, 40
+; CHECK-NEXT: mul a5, a5, a7
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
-; CHECK-NEXT: vslidedown.vx v0, v0, a6
-; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
-; CHECK-NEXT: vand.vx v8, v16, a1, v0.t
+; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a3
+; CHECK-NEXT: li a3, 63
+; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
+; CHECK-NEXT: vand.vx v8, v16, a3, v0.t
; CHECK-NEXT: csrr a5, vlenb
-; CHECK-NEXT: slli a5, a5, 5
+; CHECK-NEXT: li a6, 40
+; CHECK-NEXT: mul a5, a5, a6
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
; CHECK-NEXT: vl8r.v v16, (a5) # Unknown-size Folded Reload
; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t
-; CHECK-NEXT: addi a5, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: slli a5, a5, 3
; CHECK-NEXT: add a5, sp, a5
; CHECK-NEXT: addi a5, a5, 16
+; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: slli a5, a5, 4
+; CHECK-NEXT: add a5, sp, a5
+; CHECK-NEXT: addi a5, a5, 16
; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
; CHECK-NEXT: vnot.v v8, v8, v0.t
-; CHECK-NEXT: vand.vx v16, v8, a1, v0.t
+; CHECK-NEXT: vand.vx v8, v8, a3, v0.t
+; CHECK-NEXT: addi a5, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a5) # Unknown-size Folded Spill
; CHECK-NEXT: vl8re64.v v8, (a0)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vl8re64.v v8, (a2)
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 5
+; CHECK-NEXT: li a2, 40
+; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
-; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
+; CHECK-NEXT: vsll.vi v16, v8, 1, v0.t
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsll.vv v16, v16, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vor.vv v8, v16, v8, v0.t
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: bltu a4, a3, .LBB46_2
+; CHECK-NEXT: bltu a4, a1, .LBB46_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a4, a3
+; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB46_2:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 5
+; CHECK-NEXT: li a1, 40
+; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
+; CHECK-NEXT: vand.vx v8, v16, a3, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsrl.vv v8, v16, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 5
+; CHECK-NEXT: li a1, 40
+; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vnot.v v16, v8, v0.t
-; CHECK-NEXT: vand.vx v16, v16, a1, v0.t
+; CHECK-NEXT: vand.vx v16, v16, a3, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsll.vi v8, v8, 1, v0.t
; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: li a1, 24
+; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 40
+; CHECK-NEXT: li a1, 48
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
@@ -1150,12 +1167,12 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: add a6, sp, a6
; CHECK-NEXT: addi a6, a6, 16
; CHECK-NEXT: vl8r.v v16, (a6) # Unknown-size Folded Reload
-; CHECK-NEXT: vsll.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vsll.vv v16, v16, v8, v0.t
; CHECK-NEXT: csrr a6, vlenb
; CHECK-NEXT: slli a6, a6, 3
; CHECK-NEXT: add a6, sp, a6
; CHECK-NEXT: addi a6, a6, 16
-; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v16, (a6) # Unknown-size Folded Spill
; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: csrr a6, vlenb
; CHECK-NEXT: slli a6, a6, 4
@@ -1198,13 +1215,13 @@ define <vscale x 16 x i64> @fshl_v16i64(<vscale x 16 x i64> %a, <vscale x 16 x i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: .LBB47_2:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a1, v0.t
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
@@ -1318,10 +1335,8 @@ define <vscale x 1 x i8> @fshr_v1i4(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b,
; CHECK-NEXT: li a1, 4
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vremu.vx v10, v10, a1, v0.t
+; CHECK-NEXT: vand.vi v9, v9, 15, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vand.vi v9, v9, 15
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: vand.vi v8, v8, 15, v0.t
@@ -1343,10 +1358,8 @@ define <vscale x 1 x i8> @fshl_v1i4(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b,
; CHECK-NEXT: li a1, 4
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vremu.vx v10, v10, a1, v0.t
+; CHECK-NEXT: vand.vi v9, v9, 15, v0.t
; CHECK-NEXT: vsll.vi v8, v8, 4, v0.t
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
-; CHECK-NEXT: vand.vi v9, v9, 15
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
index 6d8763d34ec1..e34b4a81b631 100644
--- a/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/llrint-vp.ll
@@ -70,8 +70,8 @@ define <vscale x 16 x i64> @llrint_nxv16i64_nxv16f32(<vscale x 16 x float> %x, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB4_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfwcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
index 9fa8807ed4ad..c9f91bf9def2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/lrint-vp.ll
@@ -132,8 +132,8 @@ define <vscale x 16 x iXLen> @lrint_nxv16f32(<vscale x 16 x float> %x, <vscale x
; RV64-i64-NEXT: # %bb.1:
; RV64-i64-NEXT: mv a0, a1
; RV64-i64-NEXT: .LBB4_2:
-; RV64-i64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; RV64-i64-NEXT: vmv1r.v v0, v24
+; RV64-i64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; RV64-i64-NEXT: vfwcvt.x.f.v v24, v8, v0.t
; RV64-i64-NEXT: vmv8r.v v8, v24
; RV64-i64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
index f87fa3ec6f16..3aca3130cc54 100644
--- a/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/masked-tama.ll
@@ -1289,8 +1289,8 @@ define <vscale x 1 x i8> @intrinsic_viota_mask_m_nxv1i8_nxv1i1(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: viota.m v8, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -1313,8 +1313,8 @@ define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmsbf.m v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
@@ -1444,8 +1444,8 @@ define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmsbf.m v8, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
index e260ae5344e4..be37be06f0e7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -1200,13 +1200,13 @@ define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptr
; RV32-LABEL: mgather_nxv16i64:
; RV32: # %bb.0:
; RV32-NEXT: vl8re64.v v24, (a0)
-; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu
-; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: srli a2, a0, 3
; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a2
+; RV32-NEXT: vslidedown.vx v7, v0, a2
; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, mu
+; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vluxei32.v v24, (zero), v12, v0.t
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: add a0, a1, a0
@@ -1216,20 +1216,35 @@ define void @mgather_nxv16i64(<vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptr
;
; RV64-LABEL: mgather_nxv16i64:
; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: csrr a3, vlenb
+; RV64-NEXT: slli a3, a3, 3
+; RV64-NEXT: sub sp, sp, a3
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; RV64-NEXT: addi a3, sp, 16
+; RV64-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill
+; RV64-NEXT: vmv8r.v v16, v8
; RV64-NEXT: vl8re64.v v24, (a0)
-; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, mu
-; RV64-NEXT: vluxei64.v v24, (zero), v8, v0.t
-; RV64-NEXT: vl8re64.v v8, (a1)
; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: vl8re64.v v8, (a1)
; RV64-NEXT: srli a1, a0, 3
; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v0, a1
+; RV64-NEXT: vslidedown.vx v7, v0, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, mu
+; RV64-NEXT: vluxei64.v v24, (zero), v16, v0.t
+; RV64-NEXT: vmv1r.v v0, v7
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vluxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: add a0, a2, a0
; RV64-NEXT: vs8r.v v8, (a0)
; RV64-NEXT: vs8r.v v24, (a2)
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
%p0 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> undef, <vscale x 8 x ptr> %ptrs0, i64 0)
%p1 = call <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x ptr> %p0, <vscale x 8 x ptr> %ptrs1, i64 8)
@@ -2116,8 +2131,8 @@ define <vscale x 32 x i8> @mgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8>
; RV64-NEXT: vluxei64.v v15, (a0), v16, v0.t
; RV64-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v10
-; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v8
+; RV64-NEXT: vsetvli zero, zero, e8, m1, ta, mu
; RV64-NEXT: vluxei64.v v14, (a0), v16, v0.t
; RV64-NEXT: vmv4r.v v8, v12
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
index fc8fdf4aaafe..9bfa0f31dc3a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -1691,15 +1691,15 @@ declare <vscale x 16 x ptr> @llvm.vector.insert.nxv8p0.nxv16p0(<vscale x 16 x pt
define void @mscatter_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double> %val1, <vscale x 8 x ptr> %ptrs0, <vscale x 8 x ptr> %ptrs1, <vscale x 16 x i1> %m) {
; RV32-LABEL: mscatter_nxv16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vl4re32.v v24, (a0)
; RV32-NEXT: vl4re32.v v28, (a1)
-; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (zero), v24, v0.t
+; RV32-NEXT: vl4re32.v v4, (a0)
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: srli a0, a0, 3
; RV32-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a0
+; RV32-NEXT: vslidedown.vx v24, v0, a0
; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (zero), v4, v0.t
+; RV32-NEXT: vmv1r.v v0, v24
; RV32-NEXT: vsoxei32.v v16, (zero), v28, v0.t
; RV32-NEXT: ret
;
@@ -1708,25 +1708,36 @@ define void @mscatter_nxv16f64(<vscale x 8 x double> %val0, <vscale x 8 x double
; RV64-NEXT: addi sp, sp, -16
; RV64-NEXT: .cfi_def_cfa_offset 16
; RV64-NEXT: csrr a2, vlenb
-; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: slli a2, a2, 4
; RV64-NEXT: sub sp, sp, a2
-; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; RV64-NEXT: vl8re64.v v24, (a0)
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; RV64-NEXT: vl8re64.v v16, (a1)
-; RV64-NEXT: vsoxei64.v v8, (zero), v24, v0.t
+; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; RV64-NEXT: csrr a2, vlenb
+; RV64-NEXT: slli a2, a2, 3
+; RV64-NEXT: add a2, sp, a2
+; RV64-NEXT: addi a2, a2, 16
+; RV64-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; RV64-NEXT: vmv8r.v v16, v8
+; RV64-NEXT: vl8re64.v v8, (a1)
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; RV64-NEXT: vl8re64.v v8, (a0)
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: srli a0, a0, 3
; RV64-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v0, a0
+; RV64-NEXT: vslidedown.vx v24, v0, a0
; RV64-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vsoxei64.v v16, (zero), v8, v0.t
+; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: csrr a0, vlenb
+; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: add a0, sp, a0
+; RV64-NEXT: addi a0, a0, 16
; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: addi a0, sp, 16
+; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (zero), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 3
+; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: add sp, sp, a0
; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: ret
@@ -1745,13 +1756,13 @@ define void @mscatter_baseidx_nxv16i8_nxv16f64(<vscale x 8 x double> %val0, <vsc
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v24, v6
; RV32-NEXT: vsll.vi v24, v24, 3
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: srli a1, a1, 3
; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a1
+; RV32-NEXT: vslidedown.vx v7, v0, a1
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
;
@@ -1763,12 +1774,12 @@ define void @mscatter_baseidx_nxv16i8_nxv16f64(<vscale x 8 x double> %val0, <vsc
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: vsext.vf8 v8, v7
-; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i8> %idxs
@@ -1785,13 +1796,13 @@ define void @mscatter_baseidx_nxv16i16_nxv16f64(<vscale x 8 x double> %val0, <vs
; RV32-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf2 v24, v4
; RV32-NEXT: vsll.vi v24, v24, 3
-; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
-; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: srli a1, a1, 3
; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a1
+; RV32-NEXT: vslidedown.vx v7, v0, a1
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV32-NEXT: vsoxei32.v v8, (a0), v24, v0.t
+; RV32-NEXT: vmv1r.v v0, v7
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
;
@@ -1803,12 +1814,12 @@ define void @mscatter_baseidx_nxv16i16_nxv16f64(<vscale x 8 x double> %val0, <vs
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: vsext.vf4 v8, v6
-; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: srli a1, a1, 3
; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
+; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: vsoxei64.v v16, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i16> %idxs
diff --git a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
index ebe89817630d..a3ea462b6a73 100644
--- a/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/nearbyint-vp.ll
@@ -204,8 +204,8 @@ define <vscale x 4 x half> @vp_nearbyint_nxv4f16(<vscale x 4 x half> %va, <vscal
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: frflags a0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -261,16 +261,16 @@ declare <vscale x 8 x half> @llvm.vp.nearbyint.nxv8f16(<vscale x 8 x half>, <vsc
define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: frflags a0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
@@ -290,8 +290,8 @@ define <vscale x 8 x half> @vp_nearbyint_nxv8f16(<vscale x 8 x half> %va, <vscal
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: frflags a0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -347,16 +347,16 @@ declare <vscale x 16 x half> @llvm.vp.nearbyint.nxv16f16(<vscale x 16 x half>, <
define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI8_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: frflags a0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
@@ -376,8 +376,8 @@ define <vscale x 16 x half> @vp_nearbyint_nxv16f16(<vscale x 16 x half> %va, <vs
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
; ZVFHMIN-NEXT: frflags a0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
@@ -433,16 +433,16 @@ declare <vscale x 32 x half> @llvm.vp.nearbyint.nxv32f16(<vscale x 32 x half>, <
define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_nearbyint_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI10_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: frflags a0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
@@ -458,7 +458,7 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v16, v0
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -468,50 +468,49 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
; ZVFHMIN-NEXT: frflags a2
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: fsflags a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
+; ZVFHMIN-NEXT: vmflt.vf v7, v16, fa5, v0.t
; ZVFHMIN-NEXT: frflags a0
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
-; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: fsflags a0
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -556,20 +555,20 @@ define <vscale x 32 x half> @vp_nearbyint_nxv32f16_unmasked(<vscale x 32 x half>
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: frflags a2
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: fsflags a2
@@ -701,8 +700,8 @@ define <vscale x 4 x float> @vp_nearbyint_nxv4f32(<vscale x 4 x float> %va, <vsc
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -745,8 +744,8 @@ define <vscale x 8 x float> @vp_nearbyint_nxv8f32(<vscale x 8 x float> %va, <vsc
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -789,8 +788,8 @@ define <vscale x 16 x float> @vp_nearbyint_nxv16f32(<vscale x 16 x float> %va, <
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
@@ -867,16 +866,16 @@ declare <vscale x 2 x double> @llvm.vp.nearbyint.nxv2f64(<vscale x 2 x double>,
define <vscale x 2 x double> @vp_nearbyint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
@@ -911,16 +910,16 @@ declare <vscale x 4 x double> @llvm.vp.nearbyint.nxv4f64(<vscale x 4 x double>,
define <vscale x 4 x double> @vp_nearbyint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
@@ -955,16 +954,16 @@ declare <vscale x 7 x double> @llvm.vp.nearbyint.nxv7f64(<vscale x 7 x double>,
define <vscale x 7 x double> @vp_nearbyint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -999,16 +998,16 @@ declare <vscale x 8 x double> @llvm.vp.nearbyint.nxv8f64(<vscale x 8 x double>,
define <vscale x 8 x double> @vp_nearbyint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -1044,62 +1043,47 @@ declare <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double
define <vscale x 16 x double> @vp_nearbyint_nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_nearbyint_nxv16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi sp, sp, -16
-; CHECK-NEXT: .cfi_def_cfa_offset 16
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
; CHECK-NEXT: vmv1r.v v7, v0
-; CHECK-NEXT: vmv8r.v v24, v16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vfabs.v v16, v16, v0.t
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v6, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: frflags a2
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: vfcvt.x.f.v v16, v24, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: fsflags a2
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v24, v16, v24, v0.t
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v7, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: frflags a0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: fsflags a0
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add sp, sp, a0
-; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.nearbyint.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
ret <vscale x 16 x double> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/pr63596.ll b/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
index d13d67fd0a88..8bb62eaa8e9e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/pr63596.ll
@@ -27,20 +27,18 @@ define <4 x float> @foo(ptr %0) nounwind {
; CHECK-NEXT: fsw fa0, 0(sp)
; CHECK-NEXT: addi a0, sp, 4
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: addi a0, sp, 12
; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v9, v8, 1
-; CHECK-NEXT: addi a0, sp, 8
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: addi a0, sp, 12
; CHECK-NEXT: vle32.v v10, (a0)
+; CHECK-NEXT: addi a0, sp, 8
+; CHECK-NEXT: vle32.v v11, (a0)
; CHECK-NEXT: mv a0, sp
; CHECK-NEXT: vle32.v v8, (a0)
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v10, 1
+; CHECK-NEXT: vslideup.vi v10, v9, 1
+; CHECK-NEXT: vslideup.vi v8, v11, 1
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
-; CHECK-NEXT: vslideup.vi v8, v9, 2
+; CHECK-NEXT: vslideup.vi v8, v10, 2
; CHECK-NEXT: ld ra, 40(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s0, 32(sp) # 8-byte Folded Reload
; CHECK-NEXT: ld s1, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
index f934127f978d..88bd92c6ec16 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rint-vp.ll
@@ -185,8 +185,8 @@ define <vscale x 4 x half> @vp_rint_nxv4f16(<vscale x 4 x half> %va, <vscale x 4
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -237,15 +237,15 @@ declare <vscale x 8 x half> @llvm.vp.rint.nxv8f16(<vscale x 8 x half>, <vscale x
define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
@@ -263,8 +263,8 @@ define <vscale x 8 x half> @vp_rint_nxv8f16(<vscale x 8 x half> %va, <vscale x 8
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -315,15 +315,15 @@ declare <vscale x 16 x half> @llvm.vp.rint.nxv16f16(<vscale x 16 x half>, <vscal
define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI8_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
@@ -341,8 +341,8 @@ define <vscale x 16 x half> @vp_rint_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: fmv.w.x fa5, a0
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
@@ -393,15 +393,15 @@ declare <vscale x 32 x half> @llvm.vp.rint.nxv32f16(<vscale x 32 x half>, <vscal
define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_rint_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI10_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
@@ -426,46 +426,50 @@ define <vscale x 32 x half> @vp_rint_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vmv1r.v v8, v16
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
+; ZVFHMIN-NEXT: vmflt.vf v8, v16, fa5, v0.t
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
-; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -508,19 +512,19 @@ define <vscale x 32 x half> @vp_rint_nxv32f16_unmasked(<vscale x 32 x half> %va,
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
@@ -640,8 +644,8 @@ define <vscale x 4 x float> @vp_rint_nxv4f32(<vscale x 4 x float> %va, <vscale x
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
@@ -680,8 +684,8 @@ define <vscale x 8 x float> @vp_rint_nxv8f32(<vscale x 8 x float> %va, <vscale x
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
@@ -720,8 +724,8 @@ define <vscale x 16 x float> @vp_rint_nxv16f32(<vscale x 16 x float> %va, <vscal
; CHECK-NEXT: fmv.w.x fa5, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
@@ -791,15 +795,15 @@ declare <vscale x 2 x double> @llvm.vp.rint.nxv2f64(<vscale x 2 x double>, <vsca
define <vscale x 2 x double> @vp_rint_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
@@ -831,15 +835,15 @@ declare <vscale x 4 x double> @llvm.vp.rint.nxv4f64(<vscale x 4 x double>, <vsca
define <vscale x 4 x double> @vp_rint_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
@@ -871,15 +875,15 @@ declare <vscale x 7 x double> @llvm.vp.rint.nxv7f64(<vscale x 7 x double>, <vsca
define <vscale x 7 x double> @vp_rint_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -911,15 +915,15 @@ declare <vscale x 8 x double> @llvm.vp.rint.nxv8f64(<vscale x 8 x double>, <vsca
define <vscale x 8 x double> @vp_rint_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_rint_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
@@ -955,62 +959,51 @@ define <vscale x 16 x double> @vp_rint_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a2
+; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
index edeac1acf3b0..1ddadcc49373 100644
--- a/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/round-vp.ll
@@ -204,8 +204,8 @@ define <vscale x 4 x half> @vp_round_nxv4f16(<vscale x 4 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <vscale x 8 x half> @llvm.vp.round.nxv8f16(<vscale x 8 x half>, <vscale
define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <vscale x 8 x half> @vp_round_nxv8f16(<vscale x 8 x half> %va, <vscale x
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -347,16 +347,16 @@ declare <vscale x 16 x half> @llvm.vp.round.nxv16f16(<vscale x 16 x half>, <vsca
define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI8_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
-; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -376,8 +376,8 @@ define <vscale x 16 x half> @vp_round_nxv16f16(<vscale x 16 x half> %va, <vscale
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -433,16 +433,16 @@ declare <vscale x 32 x half> @llvm.vp.round.nxv32f16(<vscale x 32 x half>, <vsca
define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_round_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI10_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 4
-; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -458,7 +458,6 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -466,52 +465,60 @@ define <vscale x 32 x half> @vp_round_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: addi a4, a4, -1
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a2, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vmv1r.v v8, v16
; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
+; ZVFHMIN-NEXT: vmflt.vf v8, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 4
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fsrm a0
-; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -556,20 +563,20 @@ define <vscale x 32 x half> @vp_round_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a2, 4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
@@ -701,8 +708,8 @@ define <vscale x 4 x float> @vp_round_nxv4f32(<vscale x 4 x float> %va, <vscale
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -745,8 +752,8 @@ define <vscale x 8 x float> @vp_round_nxv8f32(<vscale x 8 x float> %va, <vscale
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -789,8 +796,8 @@ define <vscale x 16 x float> @vp_round_nxv16f32(<vscale x 16 x float> %va, <vsca
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -867,16 +874,16 @@ declare <vscale x 2 x double> @llvm.vp.round.nxv2f64(<vscale x 2 x double>, <vsc
define <vscale x 2 x double> @vp_round_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -911,16 +918,16 @@ declare <vscale x 4 x double> @llvm.vp.round.nxv4f64(<vscale x 4 x double>, <vsc
define <vscale x 4 x double> @vp_round_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -955,16 +962,16 @@ declare <vscale x 7 x double> @llvm.vp.round.nxv7f64(<vscale x 7 x double>, <vsc
define <vscale x 7 x double> @vp_round_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -999,16 +1006,16 @@ declare <vscale x 8 x double> @llvm.vp.round.nxv8f64(<vscale x 8 x double>, <vsc
define <vscale x 8 x double> @vp_round_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_round_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -1047,66 +1054,56 @@ define <vscale x 16 x double> @vp_round_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a2
+; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a2, 4
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 4
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
index a77c58ba9ec5..8c5a7bb2dea6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundeven-vp.ll
@@ -204,8 +204,8 @@ define <vscale x 4 x half> @vp_roundeven_nxv4f16(<vscale x 4 x half> %va, <vscal
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <vscale x 8 x half> @llvm.vp.roundeven.nxv8f16(<vscale x 8 x half>, <vsc
define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <vscale x 8 x half> @vp_roundeven_nxv8f16(<vscale x 8 x half> %va, <vscal
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -347,16 +347,16 @@ declare <vscale x 16 x half> @llvm.vp.roundeven.nxv16f16(<vscale x 16 x half>, <
define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI8_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -376,8 +376,8 @@ define <vscale x 16 x half> @vp_roundeven_nxv16f16(<vscale x 16 x half> %va, <vs
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -433,16 +433,16 @@ declare <vscale x 32 x half> @llvm.vp.roundeven.nxv32f16(<vscale x 32 x half>, <
define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundeven_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI10_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 0
-; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -458,7 +458,6 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -466,52 +465,60 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16(<vscale x 32 x half> %va, <vs
; ZVFHMIN-NEXT: addi a4, a4, -1
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a2, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vmv1r.v v8, v16
; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
+; ZVFHMIN-NEXT: vmflt.vf v8, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 0
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fsrm a0
-; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -556,20 +563,20 @@ define <vscale x 32 x half> @vp_roundeven_nxv32f16_unmasked(<vscale x 32 x half>
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a2, 0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
@@ -701,8 +708,8 @@ define <vscale x 4 x float> @vp_roundeven_nxv4f32(<vscale x 4 x float> %va, <vsc
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -745,8 +752,8 @@ define <vscale x 8 x float> @vp_roundeven_nxv8f32(<vscale x 8 x float> %va, <vsc
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -789,8 +796,8 @@ define <vscale x 16 x float> @vp_roundeven_nxv16f32(<vscale x 16 x float> %va, <
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -867,16 +874,16 @@ declare <vscale x 2 x double> @llvm.vp.roundeven.nxv2f64(<vscale x 2 x double>,
define <vscale x 2 x double> @vp_roundeven_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -911,16 +918,16 @@ declare <vscale x 4 x double> @llvm.vp.roundeven.nxv4f64(<vscale x 4 x double>,
define <vscale x 4 x double> @vp_roundeven_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -955,16 +962,16 @@ declare <vscale x 7 x double> @llvm.vp.roundeven.nxv7f64(<vscale x 7 x double>,
define <vscale x 7 x double> @vp_roundeven_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -999,16 +1006,16 @@ declare <vscale x 8 x double> @llvm.vp.roundeven.nxv8f64(<vscale x 8 x double>,
define <vscale x 8 x double> @vp_roundeven_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundeven_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -1047,66 +1054,56 @@ define <vscale x 16 x double> @vp_roundeven_nxv16f64(<vscale x 16 x double> %va,
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a2
+; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a2, 0
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 0
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
index 71a53c525551..1227e73a0243 100644
--- a/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/roundtozero-vp.ll
@@ -204,8 +204,8 @@ define <vscale x 4 x half> @vp_roundtozero_nxv4f16(<vscale x 4 x half> %va, <vsc
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v9, v12, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v9
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v12, v10, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -261,16 +261,16 @@ declare <vscale x 8 x half> @llvm.vp.roundtozero.nxv8f16(<vscale x 8 x half>, <v
define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv8f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI6_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI6_0)(a1)
+; ZVFH-NEXT: vmv1r.v v10, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; ZVFH-NEXT: vfabs.v v12, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; ZVFH-NEXT: vmflt.vf v10, v12, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
-; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v10
+; ZVFH-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v12, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -290,8 +290,8 @@ define <vscale x 8 x half> @vp_roundtozero_nxv8f16(<vscale x 8 x half> %va, <vsc
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v10, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v10
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v12, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -347,16 +347,16 @@ declare <vscale x 16 x half> @llvm.vp.roundtozero.nxv16f16(<vscale x 16 x half>,
define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv16f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI8_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI8_0)(a1)
+; ZVFH-NEXT: vmv1r.v v12, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFH-NEXT: vfabs.v v16, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, mu
; ZVFH-NEXT: vmflt.vf v12, v16, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
-; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v12
+; ZVFH-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v16, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -376,8 +376,8 @@ define <vscale x 16 x half> @vp_roundtozero_nxv16f16(<vscale x 16 x half> %va, <
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v12, v24, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v24, v16, v0.t
; ZVFHMIN-NEXT: fsrm a0
; ZVFHMIN-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -433,16 +433,16 @@ declare <vscale x 32 x half> @llvm.vp.roundtozero.nxv32f16(<vscale x 32 x half>,
define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vp_roundtozero_nxv32f16:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: lui a1, %hi(.LCPI10_0)
; ZVFH-NEXT: flh fa5, %lo(.LCPI10_0)(a1)
+; ZVFH-NEXT: vmv1r.v v16, v0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfabs.v v24, v8, v0.t
; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, mu
; ZVFH-NEXT: vmflt.vf v16, v24, fa5, v0.t
; ZVFH-NEXT: fsrmi a0, 1
-; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v16
+; ZVFH-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; ZVFH-NEXT: vfcvt.x.f.v v24, v8, v0.t
; ZVFH-NEXT: fsrm a0
; ZVFH-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -458,7 +458,6 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
; ZVFHMIN-NEXT: slli a1, a1, 3
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -466,52 +465,60 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16(<vscale x 32 x half> %va, <
; ZVFHMIN-NEXT: addi a4, a4, -1
; ZVFHMIN-NEXT: and a3, a4, a3
; ZVFHMIN-NEXT: srli a2, a2, 2
+; ZVFHMIN-NEXT: vmv1r.v v16, v0
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v17, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v17, v8, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a2, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v17
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: bltu a0, a1, .LBB10_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: addi a1, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vl8r.v v0, (a1) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vmv1r.v v8, v16
; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
+; ZVFHMIN-NEXT: vmflt.vf v8, v16, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a0, 1
+; ZVFHMIN-NEXT: vmv1r.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
+; ZVFHMIN-NEXT: vfcvt.x.f.v v16, v24, v0.t
+; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fsrm a0
-; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfcvt.f.x.v v16, v16, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
-; ZVFHMIN-NEXT: vfsgnj.vv v24, v8, v24, v0.t
+; ZVFHMIN-NEXT: vfsgnj.vv v24, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -556,20 +563,20 @@ define <vscale x 32 x half> @vp_roundtozero_nxv32f16_unmasked(<vscale x 32 x hal
; ZVFHMIN-NEXT: vmset.m v16
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v16, v16, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v8, v24, v0.t
; ZVFHMIN-NEXT: lui a2, 307200
; ZVFHMIN-NEXT: fmv.w.x fa5, a2
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; ZVFHMIN-NEXT: vmflt.vf v16, v8, fa5, v0.t
; ZVFHMIN-NEXT: fsrmi a2, 1
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.x.f.v v8, v24, v0.t
; ZVFHMIN-NEXT: fsrm a2
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
@@ -701,8 +708,8 @@ define <vscale x 4 x float> @vp_roundtozero_nxv4f32(<vscale x 4 x float> %va, <v
; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -745,8 +752,8 @@ define <vscale x 8 x float> @vp_roundtozero_nxv8f32(<vscale x 8 x float> %va, <v
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -789,8 +796,8 @@ define <vscale x 16 x float> @vp_roundtozero_nxv16f32(<vscale x 16 x float> %va,
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -867,16 +874,16 @@ declare <vscale x 2 x double> @llvm.vp.roundtozero.nxv2f64(<vscale x 2 x double>
define <vscale x 2 x double> @vp_roundtozero_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: lui a1, %hi(.LCPI24_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI24_0)(a1)
+; CHECK-NEXT: vmv1r.v v10, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfabs.v v12, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v12, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v12, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v12, v12, v0.t
@@ -911,16 +918,16 @@ declare <vscale x 4 x double> @llvm.vp.roundtozero.nxv4f64(<vscale x 4 x double>
define <vscale x 4 x double> @vp_roundtozero_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: lui a1, %hi(.LCPI26_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI26_0)(a1)
+; CHECK-NEXT: vmv1r.v v12, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfabs.v v16, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v12, v16, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
@@ -955,16 +962,16 @@ declare <vscale x 7 x double> @llvm.vp.roundtozero.nxv7f64(<vscale x 7 x double>
define <vscale x 7 x double> @vp_roundtozero_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI28_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI28_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -999,16 +1006,16 @@ declare <vscale x 8 x double> @llvm.vp.roundtozero.nxv8f64(<vscale x 8 x double>
define <vscale x 8 x double> @vp_roundtozero_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vp_roundtozero_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: lui a1, %hi(.LCPI30_0)
; CHECK-NEXT: fld fa5, %lo(.LCPI30_0)(a1)
+; CHECK-NEXT: vmv1r.v v16, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
; CHECK-NEXT: vmflt.vf v16, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
-; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
@@ -1047,66 +1054,56 @@ define <vscale x 16 x double> @vp_roundtozero_nxv16f64(<vscale x 16 x double> %v
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: addi a1, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 3
; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v25, v0, a2
+; CHECK-NEXT: vslidedown.vx v6, v0, a2
; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
+; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
-; CHECK-NEXT: lui a3, %hi(.LCPI32_0)
-; CHECK-NEXT: fld fa5, %lo(.LCPI32_0)(a3)
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfabs.v v8, v16, v0.t
+; CHECK-NEXT: vfabs.v v24, v16, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v25, v8, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v6, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a2, 1
+; CHECK-NEXT: vmv1r.v v0, v6
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v25
-; CHECK-NEXT: vfcvt.x.f.v v8, v16, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v16, v0.t
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a3) # Unknown-size Folded Spill
; CHECK-NEXT: fsrm a2
-; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v16, v8, v16, v0.t
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: add a2, sp, a2
-; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB32_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vfabs.v v16, v8, v0.t
+; CHECK-NEXT: vfabs.v v24, v8, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vmflt.vf v24, v16, fa5, v0.t
+; CHECK-NEXT: vmflt.vf v7, v24, fa5, v0.t
; CHECK-NEXT: fsrmi a0, 1
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfcvt.x.f.v v16, v8, v0.t
+; CHECK-NEXT: vfcvt.x.f.v v24, v8, v0.t
; CHECK-NEXT: fsrm a0
-; CHECK-NEXT: vfcvt.f.x.v v16, v16, v0.t
+; CHECK-NEXT: vfcvt.f.x.v v24, v24, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, mu
-; CHECK-NEXT: vfsgnj.vv v8, v16, v8, v0.t
+; CHECK-NEXT: vfsgnj.vv v8, v24, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
index e73415ac0085..8210ea22a6ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-vector-csr.ll
@@ -67,13 +67,13 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-NEXT: lui a0, %hi(.L.str)
; SPILL-O2-NEXT: addi a0, a0, %lo(.L.str)
; SPILL-O2-NEXT: call puts
-; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: add a0, sp, a0
; SPILL-O2-NEXT: addi a0, a0, 16
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-NEXT: vfadd.vv v8, v9, v8
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
index 483f689cf633..352362908898 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-vector-csr.ll
@@ -70,13 +70,13 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-NEXT: lui a0, %hi(.L.str)
; SPILL-O2-NEXT: addi a0, a0, %lo(.L.str)
; SPILL-O2-NEXT: call puts
-; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: add a0, sp, a0
; SPILL-O2-NEXT: addi a0, a0, 16
; SPILL-O2-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-NEXT: addi a0, sp, 16
; SPILL-O2-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; SPILL-O2-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-NEXT: vfadd.vv v8, v9, v8
; SPILL-O2-NEXT: csrr a0, vlenb
; SPILL-O2-NEXT: slli a0, a0, 1
@@ -102,11 +102,11 @@ define <vscale x 1 x double> @foo(<vscale x 1 x double> %a, <vscale x 1 x double
; SPILL-O2-VLEN128-NEXT: lui a0, %hi(.L.str)
; SPILL-O2-VLEN128-NEXT: addi a0, a0, %lo(.L.str)
; SPILL-O2-VLEN128-NEXT: call puts
-; SPILL-O2-VLEN128-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 32
; SPILL-O2-VLEN128-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; SPILL-O2-VLEN128-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; SPILL-O2-VLEN128-NEXT: vfadd.vv v8, v9, v8
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
; SPILL-O2-VLEN128-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
index 743016a7cbcd..b9ede8d68e3c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rvv-peephole-vmerge-vops.ll
@@ -893,8 +893,8 @@ define void @test_dag_loop() {
; CHECK-LABEL: test_dag_loop:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmclr.m v0
+; CHECK-NEXT: vmv.v.i v8, 0
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vsetivli zero, 0, e8, m4, tu, mu
; CHECK-NEXT: vssubu.vx v12, v8, zero, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
index cc967396153b..3dba88136306 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-fp-vp.ll
@@ -2212,19 +2212,18 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: sub a4, a2, a3
; ZVFH-NEXT: sltu a5, a2, a4
; ZVFH-NEXT: addi a5, a5, -1
-; ZVFH-NEXT: and a4, a5, a4
-; ZVFH-NEXT: vsetvli a5, zero, e8, m1, ta, ma
; ZVFH-NEXT: vl8re16.v v0, (a0)
; ZVFH-NEXT: addi a0, sp, 16
; ZVFH-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; ZVFH-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; ZVFH-NEXT: vslidedown.vx v0, v24, a1
+; ZVFH-NEXT: and a4, a5, a4
; ZVFH-NEXT: vsetvli zero, a4, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v7, v16, v8, v0.t
; ZVFH-NEXT: bltu a2, a3, .LBB85_2
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a2, a3
; ZVFH-NEXT: .LBB85_2:
-; ZVFH-NEXT: vsetvli zero, a2, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v24
; ZVFH-NEXT: csrr a0, vlenb
; ZVFH-NEXT: slli a0, a0, 3
@@ -2233,6 +2232,7 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFH-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFH-NEXT: addi a0, sp, 16
; ZVFH-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFH-NEXT: vsetvli zero, a2, e16, m8, ta, ma
; ZVFH-NEXT: vmfeq.vv v16, v8, v24, v0.t
; ZVFH-NEXT: add a0, a1, a1
; ZVFH-NEXT: vsetvli zero, a0, e8, m1, ta, ma
@@ -2249,133 +2249,152 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 5
+; ZVFHMIN-NEXT: li a3, 34
+; ZVFHMIN-NEXT: mul a1, a1, a3
; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 4
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x22, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 34 * vlenb
+; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: li a3, 18
+; ZVFHMIN-NEXT: mul a1, a1, a3
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a3, vlenb
-; ZVFHMIN-NEXT: srli a1, a3, 1
-; ZVFHMIN-NEXT: vsetvli a4, zero, e8, m1, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v17, v0
-; ZVFHMIN-NEXT: vslidedown.vx v18, v0, a1
-; ZVFHMIN-NEXT: slli a4, a3, 3
-; ZVFHMIN-NEXT: add a4, a0, a4
-; ZVFHMIN-NEXT: vl8re16.v v0, (a4)
+; ZVFHMIN-NEXT: slli a1, a3, 3
+; ZVFHMIN-NEXT: add a1, a0, a1
+; ZVFHMIN-NEXT: vl8re16.v v16, (a1)
; ZVFHMIN-NEXT: slli a5, a3, 2
-; ZVFHMIN-NEXT: sub a4, a2, a5
-; ZVFHMIN-NEXT: sltu a6, a2, a4
-; ZVFHMIN-NEXT: addi a6, a6, -1
-; ZVFHMIN-NEXT: and a6, a6, a4
+; ZVFHMIN-NEXT: sub a1, a2, a5
+; ZVFHMIN-NEXT: sltu a4, a2, a1
+; ZVFHMIN-NEXT: addi a4, a4, -1
+; ZVFHMIN-NEXT: and a6, a4, a1
; ZVFHMIN-NEXT: slli a4, a3, 1
-; ZVFHMIN-NEXT: sub a7, a6, a4
-; ZVFHMIN-NEXT: sltu t0, a6, a7
-; ZVFHMIN-NEXT: addi t0, t0, -1
-; ZVFHMIN-NEXT: and a7, t0, a7
+; ZVFHMIN-NEXT: sub a1, a6, a4
+; ZVFHMIN-NEXT: sltu a7, a6, a1
+; ZVFHMIN-NEXT: addi a7, a7, -1
+; ZVFHMIN-NEXT: and a7, a7, a1
+; ZVFHMIN-NEXT: srli a1, a3, 1
+; ZVFHMIN-NEXT: csrr t0, vlenb
+; ZVFHMIN-NEXT: add t0, sp, t0
+; ZVFHMIN-NEXT: addi t0, t0, 16
+; ZVFHMIN-NEXT: vs1r.v v0, (t0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli t0, zero, e8, m1, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v8, v0, a1
; ZVFHMIN-NEXT: srli a3, a3, 2
+; ZVFHMIN-NEXT: addi t0, sp, 16
+; ZVFHMIN-NEXT: vs1r.v v8, (t0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli t0, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslidedown.vx v16, v18, a3
-; ZVFHMIN-NEXT: vsetvli t0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v8, a3
; ZVFHMIN-NEXT: vl8re16.v v8, (a0)
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li t0, 24
+; ZVFHMIN-NEXT: li t0, 26
; ZVFHMIN-NEXT: mul a0, a0, t0
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: slli a0, a0, 1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: vmv4r.v v16, v24
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li t0, 10
+; ZVFHMIN-NEXT: mul a0, a0, t0
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
; ZVFHMIN-NEXT: vsetvli zero, a7, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v16
-; ZVFHMIN-NEXT: vmfeq.vv v20, v8, v24, v0.t
+; ZVFHMIN-NEXT: vmfeq.vv v26, v16, v8, v0.t
; ZVFHMIN-NEXT: bltu a6, a4, .LBB85_2
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a6, a4
; ZVFHMIN-NEXT: .LBB85_2:
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: li a7, 10
+; ZVFHMIN-NEXT: mul a0, a0, a7
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl1r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, a6, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v18
-; ZVFHMIN-NEXT: vmfeq.vv v6, v24, v8, v0.t
+; ZVFHMIN-NEXT: vmfeq.vv v6, v16, v8, v0.t
; ZVFHMIN-NEXT: add a0, a3, a3
; ZVFHMIN-NEXT: bltu a2, a5, .LBB85_4
; ZVFHMIN-NEXT: # %bb.3:
; ZVFHMIN-NEXT: mv a2, a5
; ZVFHMIN-NEXT: .LBB85_4:
; ZVFHMIN-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vslideup.vx v6, v20, a3
+; ZVFHMIN-NEXT: vslideup.vx v6, v26, a3
; ZVFHMIN-NEXT: sub a5, a2, a4
; ZVFHMIN-NEXT: sltu a6, a2, a5
; ZVFHMIN-NEXT: addi a6, a6, -1
; ZVFHMIN-NEXT: and a5, a6, a5
+; ZVFHMIN-NEXT: csrr a6, vlenb
+; ZVFHMIN-NEXT: add a6, sp, a6
+; ZVFHMIN-NEXT: addi a6, a6, 16
+; ZVFHMIN-NEXT: vl1r.v v8, (a6) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vmv1r.v v7, v8
; ZVFHMIN-NEXT: vsetvli a6, zero, e8, mf2, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v7, v17
-; ZVFHMIN-NEXT: vslidedown.vx v0, v17, a3
-; ZVFHMIN-NEXT: vsetvli a6, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vslidedown.vx v0, v8, a3
; ZVFHMIN-NEXT: csrr a6, vlenb
-; ZVFHMIN-NEXT: slli a6, a6, 3
+; ZVFHMIN-NEXT: li a7, 18
+; ZVFHMIN-NEXT: mul a6, a6, a7
; ZVFHMIN-NEXT: add a6, sp, a6
; ZVFHMIN-NEXT: addi a6, a6, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a6) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: vl8r.v v24, (a6) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli a6, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
; ZVFHMIN-NEXT: csrr a6, vlenb
-; ZVFHMIN-NEXT: slli a6, a6, 4
+; ZVFHMIN-NEXT: li a7, 10
+; ZVFHMIN-NEXT: mul a6, a6, a7
; ZVFHMIN-NEXT: add a6, sp, a6
; ZVFHMIN-NEXT: addi a6, a6, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a6, vlenb
-; ZVFHMIN-NEXT: li a7, 24
+; ZVFHMIN-NEXT: li a7, 26
; ZVFHMIN-NEXT: mul a6, a6, a7
; ZVFHMIN-NEXT: add a6, sp, a6
; ZVFHMIN-NEXT: addi a6, a6, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a6) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
+; ZVFHMIN-NEXT: vl8r.v v16, (a6) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: csrr a6, vlenb
+; ZVFHMIN-NEXT: li a7, 10
+; ZVFHMIN-NEXT: mul a6, a6, a7
+; ZVFHMIN-NEXT: add a6, sp, a6
+; ZVFHMIN-NEXT: addi a6, a6, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a6) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
-; ZVFHMIN-NEXT: csrr a5, vlenb
-; ZVFHMIN-NEXT: slli a5, a5, 4
-; ZVFHMIN-NEXT: add a5, sp, a5
-; ZVFHMIN-NEXT: addi a5, a5, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vmfeq.vv v5, v24, v8, v0.t
+; ZVFHMIN-NEXT: vmfeq.vv v5, v16, v8, v0.t
; ZVFHMIN-NEXT: bltu a2, a4, .LBB85_6
; ZVFHMIN-NEXT: # %bb.5:
; ZVFHMIN-NEXT: mv a2, a4
; ZVFHMIN-NEXT: .LBB85_6:
; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: csrr a4, vlenb
-; ZVFHMIN-NEXT: li a5, 24
+; ZVFHMIN-NEXT: li a5, 26
; ZVFHMIN-NEXT: mul a4, a4, a5
; ZVFHMIN-NEXT: add a4, sp, a4
; ZVFHMIN-NEXT: addi a4, a4, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a4) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vmv1r.v v0, v7
-; ZVFHMIN-NEXT: vmfeq.vv v8, v24, v16, v0.t
+; ZVFHMIN-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vmfeq.vv v8, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslideup.vx v8, v5, a3
; ZVFHMIN-NEXT: add a0, a1, a1
@@ -2383,7 +2402,8 @@ define <vscale x 64 x i1> @fcmp_oeq_vv_nxv64f16(<vscale x 64 x half> %va, <vscal
; ZVFHMIN-NEXT: vslideup.vx v8, v6, a1
; ZVFHMIN-NEXT: vmv.v.v v0, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 34
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
@@ -3474,130 +3494,154 @@ define <vscale x 32 x i1> @fcmp_oeq_vv_nxv32f64(<vscale x 32 x double> %va, <vsc
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: li a3, 24
+; CHECK-NEXT: li a3, 48
; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: li a3, 40
+; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: li a1, 24
-; CHECK-NEXT: mul t2, a3, a1
-; CHECK-NEXT: slli a7, a3, 3
-; CHECK-NEXT: srli a4, a3, 2
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v7, v0, a4
-; CHECK-NEXT: srli a1, a3, 3
-; CHECK-NEXT: slli t0, a3, 4
-; CHECK-NEXT: add a5, a2, a7
-; CHECK-NEXT: vl8re64.v v16, (a5)
-; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
-; CHECK-NEXT: slli a5, a3, 1
-; CHECK-NEXT: vslidedown.vx v0, v0, a1
-; CHECK-NEXT: mv t1, a6
+; CHECK-NEXT: mul t0, a4, a1
+; CHECK-NEXT: slli t1, a4, 3
+; CHECK-NEXT: srli a1, a4, 2
+; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v7, v0, a1
+; CHECK-NEXT: srli a3, a4, 3
+; CHECK-NEXT: add a5, a2, t1
+; CHECK-NEXT: vl8re64.v v8, (a5)
+; CHECK-NEXT: slli t3, a4, 4
+; CHECK-NEXT: slli a5, a4, 1
+; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a3
+; CHECK-NEXT: mv a7, a6
; CHECK-NEXT: bltu a6, a5, .LBB171_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv t1, a5
+; CHECK-NEXT: mv a7, a5
; CHECK-NEXT: .LBB171_2:
-; CHECK-NEXT: add t2, a2, t2
-; CHECK-NEXT: add a7, a0, a7
-; CHECK-NEXT: add t0, a2, t0
-; CHECK-NEXT: vl8re64.v v8, (a2)
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: sub a2, t1, a3
-; CHECK-NEXT: sltu t3, t1, a2
-; CHECK-NEXT: addi t3, t3, -1
-; CHECK-NEXT: and a2, t3, a2
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: add t2, a2, t0
+; CHECK-NEXT: add t1, a0, t1
+; CHECK-NEXT: add t0, a2, t3
+; CHECK-NEXT: vl8re64.v v16, (a2)
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: slli a2, a2, 4
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v6, v8, v16, v0.t
-; CHECK-NEXT: bltu t1, a3, .LBB171_4
+; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: sub a2, a7, a4
+; CHECK-NEXT: sltu t3, a7, a2
+; CHECK-NEXT: addi t3, t3, -1
+; CHECK-NEXT: and a2, t3, a2
+; CHECK-NEXT: csrr t3, vlenb
+; CHECK-NEXT: slli t3, t3, 5
+; CHECK-NEXT: add t3, sp, t3
+; CHECK-NEXT: addi t3, t3, 16
+; CHECK-NEXT: vl8r.v v16, (t3) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-NEXT: vmfeq.vv v6, v16, v8, v0.t
+; CHECK-NEXT: bltu a7, a4, .LBB171_4
; CHECK-NEXT: # %bb.3:
-; CHECK-NEXT: mv t1, a3
+; CHECK-NEXT: mv a7, a4
; CHECK-NEXT: .LBB171_4:
-; CHECK-NEXT: vl8re64.v v16, (t2)
+; CHECK-NEXT: vl8re64.v v8, (t2)
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: slli a2, a2, 5
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v18, v7, a1
-; CHECK-NEXT: vsetvli zero, t1, e64, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8re64.v v8, (t1)
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 4
+; CHECK-NEXT: li t1, 24
+; CHECK-NEXT: mul a2, a2, t1
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
-; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v17, v24, v8, v0.t
-; CHECK-NEXT: vl8re64.v v8, (a7)
+; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v18, v7, a3
+; CHECK-NEXT: vl8re64.v v8, (t0)
; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 4
+; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: add a2, sp, a2
; CHECK-NEXT: addi a2, a2, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
-; CHECK-NEXT: vl8re64.v v8, (t0)
-; CHECK-NEXT: add a2, a1, a1
+; CHECK-NEXT: vl8re64.v v8, (a0)
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a2, 40
+; CHECK-NEXT: mul a0, a0, a2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
+; CHECK-NEXT: vmfeq.vv v17, v24, v8, v0.t
+; CHECK-NEXT: add a2, a3, a3
+; CHECK-NEXT: sub a0, a6, a5
+; CHECK-NEXT: sltu a5, a6, a0
+; CHECK-NEXT: addi a5, a5, -1
+; CHECK-NEXT: and a0, a5, a0
; CHECK-NEXT: vsetvli zero, a2, e8, mf2, tu, ma
-; CHECK-NEXT: sub a2, a6, a5
-; CHECK-NEXT: sltu a5, a6, a2
-; CHECK-NEXT: vl8re64.v v24, (a0)
-; CHECK-NEXT: addi a0, a5, -1
-; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: vslideup.vx v17, v6, a1
+; CHECK-NEXT: vslideup.vx v17, v6, a3
; CHECK-NEXT: mv a2, a0
-; CHECK-NEXT: bltu a0, a3, .LBB171_6
+; CHECK-NEXT: bltu a0, a4, .LBB171_6
; CHECK-NEXT: # %bb.5:
-; CHECK-NEXT: mv a2, a3
+; CHECK-NEXT: mv a2, a4
; CHECK-NEXT: .LBB171_6:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
+; CHECK-NEXT: csrr a5, vlenb
+; CHECK-NEXT: slli a5, a5, 3
+; CHECK-NEXT: add a5, sp, a5
+; CHECK-NEXT: addi a5, a5, 16
+; CHECK-NEXT: vl8r.v v8, (a5) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a5, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a5) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmfeq.vv v16, v24, v8, v0.t
-; CHECK-NEXT: add a2, a4, a1
-; CHECK-NEXT: vsetvli zero, a2, e8, mf2, tu, ma
-; CHECK-NEXT: vslideup.vx v17, v16, a4
-; CHECK-NEXT: sub a2, a0, a3
+; CHECK-NEXT: sub a2, a0, a4
; CHECK-NEXT: sltu a0, a0, a2
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: and a0, a0, a2
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v18
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmfeq.vv v16, v8, v24, v0.t
-; CHECK-NEXT: slli a0, a1, 1
-; CHECK-NEXT: add a0, a0, a1
-; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT: vslideup.vx v17, v16, a0
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 5
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: li a4, 24
+; CHECK-NEXT: mul a2, a2, a4
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vmfeq.vv v18, v8, v24, v0.t
+; CHECK-NEXT: add a0, a1, a3
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT: vslideup.vx v17, v16, a1
+; CHECK-NEXT: slli a0, a3, 1
+; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: add a3, a0, a3
+; CHECK-NEXT: vsetvli zero, a3, e8, mf2, ta, ma
+; CHECK-NEXT: vslideup.vx v17, v18, a0
; CHECK-NEXT: vmv1r.v v0, v17
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a1, 24
+; CHECK-NEXT: li a1, 48
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
index 85f5ffd784e9..eb8c58d2d377 100644
--- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll
@@ -1106,10 +1106,10 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: vlm.v v0, (a2)
; CHECK-NEXT: sub a2, a3, a1
; CHECK-NEXT: sltu a4, a3, a2
-; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: vl8r.v v24, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a2, a4, a2
; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vv v6, v16, v8, v0.t
@@ -1117,7 +1117,6 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a1
; CHECK-NEXT: .LBB96_2:
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -1126,6 +1125,7 @@ define <vscale x 128 x i1> @icmp_eq_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmv1r.v v8, v6
@@ -1156,8 +1156,8 @@ define <vscale x 128 x i1> @icmp_eq_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB97_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmv1r.v v8, v25
@@ -1186,8 +1186,8 @@ define <vscale x 128 x i1> @icmp_eq_vx_swap_nxv128i8(<vscale x 128 x i8> %va, i8
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB98_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v16
; CHECK-NEXT: vmv1r.v v8, v25
@@ -2257,19 +2257,18 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: sub a4, a2, a3
; CHECK-NEXT: sltu a5, a2, a4
; CHECK-NEXT: addi a5, a5, -1
-; CHECK-NEXT: and a4, a5, a4
-; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
; CHECK-NEXT: vl8re32.v v0, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v24, a1
+; CHECK-NEXT: and a4, a5, a4
; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vv v7, v16, v8, v0.t
; CHECK-NEXT: bltu a2, a3, .LBB189_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB189_2:
-; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -2278,6 +2277,7 @@ define <vscale x 32 x i1> @icmp_eq_vv_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vv v16, v8, v24, v0.t
; CHECK-NEXT: add a0, a1, a1
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -2311,8 +2311,8 @@ define <vscale x 32 x i1> @icmp_eq_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: .LBB190_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: add a0, a2, a2
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
@@ -2344,8 +2344,8 @@ define <vscale x 32 x i1> @icmp_eq_vx_swap_nxv32i32(<vscale x 32 x i32> %va, i32
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a3
; CHECK-NEXT: .LBB191_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmseq.vx v16, v8, a0, v0.t
; CHECK-NEXT: add a0, a2, a2
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
index ab7da9e0faf2..6e327457bebf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/shuffle-reverse.ll
@@ -49,8 +49,8 @@ define <8 x i8> @v4i8_2(<4 x i8> %a, <4 x i8> %b) {
; CHECK-NEXT: vid.v v11
; CHECK-NEXT: vrsub.vi v12, v11, 7
; CHECK-NEXT: vrgather.vv v10, v8, v12
-; CHECK-NEXT: vrsub.vi v8, v11, 3
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vrsub.vi v8, v11, 3
; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t
; CHECK-NEXT: vmv1r.v v8, v10
; CHECK-NEXT: ret
@@ -78,11 +78,11 @@ define <16 x i8> @v8i8_2(<8 x i8> %a, <8 x i8> %b) {
; CHECK-NEXT: vid.v v11
; CHECK-NEXT: vrsub.vi v12, v11, 15
; CHECK-NEXT: vrgather.vv v10, v8, v12
-; CHECK-NEXT: vrsub.vi v8, v11, 7
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vrsub.vi v8, v11, 7
; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
@@ -174,8 +174,8 @@ define <8 x i16> @v4i16_2(<4 x i16> %a, <4 x i16> %b) {
; CHECK-NEXT: vid.v v11
; CHECK-NEXT: vrsub.vi v12, v11, 7
; CHECK-NEXT: vrgather.vv v10, v8, v12
-; CHECK-NEXT: vrsub.vi v8, v11, 3
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vrsub.vi v8, v11, 3
; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
@@ -234,10 +234,10 @@ define <32 x i16> @v16i16_2(<16 x i16> %a, <16 x i16> %b) {
; CHECK-NEXT: addi a0, a0, %lo(.LCPI15_0)
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
-; CHECK-NEXT: vle16.v v20, (a0)
-; CHECK-NEXT: vmv2r.v v16, v10
+; CHECK-NEXT: vle16.v v16, (a0)
+; CHECK-NEXT: vmv2r.v v20, v10
; CHECK-NEXT: vmv2r.v v12, v8
-; CHECK-NEXT: vrgather.vv v8, v12, v20
+; CHECK-NEXT: vrgather.vv v8, v12, v16
; CHECK-NEXT: vid.v v12
; CHECK-NEXT: vrsub.vi v12, v12, 15
; CHECK-NEXT: lui a0, 16
@@ -245,7 +245,7 @@ define <32 x i16> @v16i16_2(<16 x i16> %a, <16 x i16> %b) {
; CHECK-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, mu
-; CHECK-NEXT: vrgather.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vrgather.vv v8, v20, v12, v0.t
; CHECK-NEXT: ret
%v32i16 = shufflevector <16 x i16> %a, <16 x i16> %b, <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <32 x i16> %v32i16
@@ -329,18 +329,18 @@ define <16 x i32> @v8i32_2(<8 x i32> %a, <8 x i32> %b) {
; CHECK-LABEL: v8i32_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv2r.v v16, v10
-; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vid.v v14
-; CHECK-NEXT: vrsub.vi v18, v14, 15
+; CHECK-NEXT: vid.v v10
+; CHECK-NEXT: vrsub.vi v18, v10, 15
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v8, v12, v18
+; CHECK-NEXT: vrgatherei16.vv v12, v8, v18
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vrsub.vi v12, v14, 7
+; CHECK-NEXT: vrsub.vi v8, v10, 7
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT: vrgatherei16.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vrgatherei16.vv v12, v16, v8, v0.t
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%v16i32 = shufflevector <8 x i32> %a, <8 x i32> %b, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <16 x i32> %v16i32
@@ -492,8 +492,8 @@ define <8 x half> @v4f16_2(<4 x half> %a, <4 x half> %b) {
; CHECK-NEXT: vid.v v11
; CHECK-NEXT: vrsub.vi v12, v11, 7
; CHECK-NEXT: vrgather.vv v10, v8, v12
-; CHECK-NEXT: vrsub.vi v8, v11, 3
; CHECK-NEXT: vmv.v.i v0, 15
+; CHECK-NEXT: vrsub.vi v8, v11, 3
; CHECK-NEXT: vrgather.vv v10, v9, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
@@ -638,18 +638,18 @@ define <16 x float> @v8f32_2(<8 x float> %a, <8 x float> %b) {
; CHECK-LABEL: v8f32_2:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv2r.v v16, v10
-; CHECK-NEXT: vmv2r.v v12, v8
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
-; CHECK-NEXT: vid.v v14
-; CHECK-NEXT: vrsub.vi v18, v14, 15
+; CHECK-NEXT: vid.v v10
+; CHECK-NEXT: vrsub.vi v18, v10, 15
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v8, v12, v18
+; CHECK-NEXT: vrgatherei16.vv v12, v8, v18
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vrsub.vi v12, v14, 7
+; CHECK-NEXT: vrsub.vi v8, v10, 7
; CHECK-NEXT: li a0, 255
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, mu
-; CHECK-NEXT: vrgatherei16.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vrgatherei16.vv v12, v16, v8, v0.t
+; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
%v16f32 = shufflevector <8 x float> %a, <8 x float> %b, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
ret <16 x float> %v16f32
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 8a297db7a3b8..d1c98f828e76 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -4873,8 +4873,8 @@ define void @sink_splat_vp_icmp(ptr nocapture %x, i32 signext %y, <4 x i1> %m, i
; CHECK-NEXT: .LBB102_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vmseq.vx v0, v10, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0), v0.t
@@ -4914,8 +4914,8 @@ define void @sink_splat_vp_fcmp(ptr nocapture %x, float %y, <4 x i1> %m, i32 zer
; CHECK-NEXT: .LBB103_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v10, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vmfeq.vf v0, v10, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0), v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll
index 5d5a2a3b898b..28583efccdbc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/splat-vector-split-i64-vl-sdnode.ll
@@ -76,7 +76,6 @@ define i32 @splat_vector_split_i64() {
; CHECK-NEXT: vand.vv v8, v8, v12
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: vor.vv v8, v10, v8
-; CHECK-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v8, v8, 3
; CHECK-NEXT: vmv.x.s a0, v8
; CHECK-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll b/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll
index b7fe722958bf..9d0234d2ec2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sshl_sat_vec.ll
@@ -33,13 +33,13 @@ define <4 x i32> @vec_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind {
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 524288
; CHECK-NEXT: addi a1, a0, -1
-; CHECK-NEXT: vsll.vv v10, v8, v9
-; CHECK-NEXT: vsra.vv v9, v10, v9
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vsll.vv v11, v8, v9
+; CHECK-NEXT: vsra.vv v9, v11, v9
; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
+; CHECK-NEXT: vmerge.vxm v9, v10, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
+; CHECK-NEXT: vmerge.vvm v8, v11, v9, v0
; CHECK-NEXT: ret
%tmp = call <4 x i32> @llvm.sshl.sat.v4i32(<4 x i32> %x, <4 x i32> %y)
ret <4 x i32> %tmp
@@ -52,13 +52,13 @@ define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind {
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 8
; CHECK-NEXT: addi a1, a0, -1
-; CHECK-NEXT: vsll.vv v10, v8, v9
-; CHECK-NEXT: vsra.vv v9, v10, v9
+; CHECK-NEXT: vmv.v.x v10, a1
+; CHECK-NEXT: vsll.vv v11, v8, v9
+; CHECK-NEXT: vsra.vv v9, v11, v9
; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv.v.x v9, a1
-; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
+; CHECK-NEXT: vmerge.vxm v9, v10, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
+; CHECK-NEXT: vmerge.vvm v8, v11, v9, v0
; CHECK-NEXT: ret
%tmp = call <8 x i16> @llvm.sshl.sat.v8i16(<8 x i16> %x, <8 x i16> %y)
ret <8 x i16> %tmp
@@ -70,14 +70,14 @@ define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind {
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, 127
-; CHECK-NEXT: vsll.vv v10, v8, v9
-; CHECK-NEXT: vsra.vv v9, v10, v9
-; CHECK-NEXT: vmsne.vv v8, v8, v9
-; CHECK-NEXT: vmv.v.x v9, a0
+; CHECK-NEXT: vmv.v.x v10, a0
; CHECK-NEXT: li a0, 128
-; CHECK-NEXT: vmerge.vxm v9, v9, a0, v0
+; CHECK-NEXT: vsll.vv v11, v8, v9
+; CHECK-NEXT: vsra.vv v9, v11, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmerge.vxm v9, v10, a0, v0
; CHECK-NEXT: vmv.v.v v0, v8
-; CHECK-NEXT: vmerge.vvm v8, v10, v9, v0
+; CHECK-NEXT: vmerge.vvm v8, v11, v9, v0
; CHECK-NEXT: ret
%tmp = call <16 x i8> @llvm.sshl.sat.v16i8(<16 x i8> %x, <16 x i8> %y)
ret <16 x i8> %tmp
@@ -115,13 +115,13 @@ define <vscale x 4 x i32> @vec_nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32>
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 524288
; CHECK-NEXT: addi a1, a0, -1
-; CHECK-NEXT: vsll.vv v12, v8, v10
-; CHECK-NEXT: vsra.vv v14, v12, v10
-; CHECK-NEXT: vmsne.vv v10, v8, v14
-; CHECK-NEXT: vmv.v.x v8, a1
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
+; CHECK-NEXT: vmv.v.x v12, a1
+; CHECK-NEXT: vsll.vv v14, v8, v10
+; CHECK-NEXT: vsra.vv v16, v14, v10
+; CHECK-NEXT: vmsne.vv v10, v8, v16
+; CHECK-NEXT: vmerge.vxm v8, v12, a0, v0
; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
+; CHECK-NEXT: vmerge.vvm v8, v14, v8, v0
; CHECK-NEXT: ret
%tmp = call <vscale x 4 x i32> @llvm.sshl.sat.nxv4i32(<vscale x 4 x i32> %x, <vscale x 4 x i32> %y)
ret <vscale x 4 x i32> %tmp
@@ -134,13 +134,13 @@ define <vscale x 8 x i16> @vec_nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16>
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: lui a0, 8
; CHECK-NEXT: addi a1, a0, -1
-; CHECK-NEXT: vsll.vv v12, v8, v10
-; CHECK-NEXT: vsra.vv v14, v12, v10
-; CHECK-NEXT: vmsne.vv v10, v8, v14
-; CHECK-NEXT: vmv.v.x v8, a1
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
+; CHECK-NEXT: vmv.v.x v12, a1
+; CHECK-NEXT: vsll.vv v14, v8, v10
+; CHECK-NEXT: vsra.vv v16, v14, v10
+; CHECK-NEXT: vmsne.vv v10, v8, v16
+; CHECK-NEXT: vmerge.vxm v8, v12, a0, v0
; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
+; CHECK-NEXT: vmerge.vvm v8, v14, v8, v0
; CHECK-NEXT: ret
%tmp = call <vscale x 8 x i16> @llvm.sshl.sat.nxv8i16(<vscale x 8 x i16> %x, <vscale x 8 x i16> %y)
ret <vscale x 8 x i16> %tmp
@@ -152,14 +152,14 @@ define <vscale x 16 x i8> @vec_nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8>
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-NEXT: vmsle.vi v0, v8, -1
; CHECK-NEXT: li a0, 127
-; CHECK-NEXT: vsll.vv v12, v8, v10
-; CHECK-NEXT: vsra.vv v14, v12, v10
-; CHECK-NEXT: vmsne.vv v10, v8, v14
-; CHECK-NEXT: vmv.v.x v8, a0
+; CHECK-NEXT: vmv.v.x v12, a0
; CHECK-NEXT: li a0, 128
-; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
+; CHECK-NEXT: vsll.vv v14, v8, v10
+; CHECK-NEXT: vsra.vv v16, v14, v10
+; CHECK-NEXT: vmsne.vv v10, v8, v16
+; CHECK-NEXT: vmerge.vxm v8, v12, a0, v0
; CHECK-NEXT: vmv1r.v v0, v10
-; CHECK-NEXT: vmerge.vvm v8, v12, v8, v0
+; CHECK-NEXT: vmerge.vvm v8, v14, v8, v0
; CHECK-NEXT: ret
%tmp = call <vscale x 16 x i8> @llvm.sshl.sat.nxv16i8(<vscale x 16 x i8> %x, <vscale x 16 x i8> %y)
ret <vscale x 16 x i8> %tmp
diff --git a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
index eff8c26d4d06..b3150ecea6c0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/stepvector.ll
@@ -562,8 +562,8 @@ define <vscale x 16 x i64> @add_stepvector_nxv16i64() {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
; RV32-NEXT: sw a0, 8(sp)
-; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vid.v v8
; RV32-NEXT: vadd.vv v8, v8, v8
@@ -597,8 +597,8 @@ define <vscale x 16 x i64> @mul_stepvector_nxv16i64() {
; RV32-NEXT: slli a1, a0, 1
; RV32-NEXT: add a0, a1, a0
; RV32-NEXT: sw a0, 8(sp)
-; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vid.v v8
; RV32-NEXT: li a0, 3
@@ -686,8 +686,8 @@ define <vscale x 16 x i64> @shl_stepvector_nxv16i64() {
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 2
; RV32-NEXT: sw a0, 8(sp)
-; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, ma
; RV32-NEXT: addi a0, sp, 8
+; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
; RV32-NEXT: vid.v v8
; RV32-NEXT: vsll.vi v8, v8, 2
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
index 0e2105d5cba8..4d3bced0bcb5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpload.ll
@@ -588,15 +588,15 @@ define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV32-NEXT: mv a3, a4
; CHECK-RV32-NEXT: .LBB49_2:
; CHECK-RV32-NEXT: mul a5, a3, a1
-; CHECK-RV32-NEXT: add a5, a0, a5
; CHECK-RV32-NEXT: srli a4, a4, 3
; CHECK-RV32-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vslidedown.vx v8, v9, a4
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; CHECK-RV32-NEXT: add a5, a0, a5
; CHECK-RV32-NEXT: vmv1r.v v0, v8
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v16, (a5), a1, v0.t
-; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v9
+; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
@@ -613,15 +613,15 @@ define <vscale x 16 x double> @strided_load_nxv16f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV64-NEXT: mv a2, a4
; CHECK-RV64-NEXT: .LBB49_2:
; CHECK-RV64-NEXT: mul a5, a2, a1
-; CHECK-RV64-NEXT: add a5, a0, a5
; CHECK-RV64-NEXT: srli a4, a4, 3
; CHECK-RV64-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vslidedown.vx v8, v9, a4
-; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
+; CHECK-RV64-NEXT: add a5, a0, a5
; CHECK-RV64-NEXT: vmv1r.v v0, v8
+; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v16, (a5), a1, v0.t
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v9
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%v = call <vscale x 16 x double> @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr %ptr, i64 %stride, <vscale x 16 x i1> %mask, i32 %evl)
@@ -697,10 +697,10 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV32-NEXT: mv a5, a2
; CHECK-RV32-NEXT: .LBB51_4:
; CHECK-RV32-NEXT: mul t1, a5, a1
-; CHECK-RV32-NEXT: add t1, a0, t1
; CHECK-RV32-NEXT: srli t2, a2, 3
; CHECK-RV32-NEXT: vsetvli t3, zero, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vslidedown.vx v0, v8, t2
+; CHECK-RV32-NEXT: add t1, a0, t1
; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v16, (t1), a1, v0.t
; CHECK-RV32-NEXT: sub a7, a3, a7
@@ -712,14 +712,14 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV32-NEXT: mv a3, a2
; CHECK-RV32-NEXT: .LBB51_6:
; CHECK-RV32-NEXT: mul a6, a6, a1
-; CHECK-RV32-NEXT: add a6, a0, a6
; CHECK-RV32-NEXT: srli a2, a2, 2
; CHECK-RV32-NEXT: vsetvli a7, zero, e8, mf2, ta, ma
; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a2
+; CHECK-RV32-NEXT: add a6, a0, a6
; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v24, (a6), a1, v0.t
-; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v8
+; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: vs1r.v v24, (a4)
; CHECK-RV32-NEXT: ret
@@ -744,10 +744,10 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV64-NEXT: mv a5, a4
; CHECK-RV64-NEXT: .LBB51_4:
; CHECK-RV64-NEXT: mul t1, a5, a1
-; CHECK-RV64-NEXT: add t1, a0, t1
; CHECK-RV64-NEXT: srli t2, a4, 3
; CHECK-RV64-NEXT: vsetvli t3, zero, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vslidedown.vx v0, v8, t2
+; CHECK-RV64-NEXT: add t1, a0, t1
; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v16, (t1), a1, v0.t
; CHECK-RV64-NEXT: sub a7, a2, a7
@@ -759,14 +759,14 @@ define <vscale x 16 x double> @strided_load_nxv17f64(ptr %ptr, i64 %stride, <vsc
; CHECK-RV64-NEXT: mv a2, a4
; CHECK-RV64-NEXT: .LBB51_6:
; CHECK-RV64-NEXT: mul a6, a6, a1
-; CHECK-RV64-NEXT: add a6, a0, a6
; CHECK-RV64-NEXT: srli a4, a4, 2
; CHECK-RV64-NEXT: vsetvli a7, zero, e8, mf2, ta, ma
; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a4
+; CHECK-RV64-NEXT: add a6, a0, a6
; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v24, (a6), a1, v0.t
-; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v8
+; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: vs1r.v v24, (a3)
; CHECK-RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
index 9378bb3d3ca6..e8704b35f31f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-vpstore.ll
@@ -504,10 +504,10 @@ define void @strided_store_nxv16f64(<vscale x 16 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a2, a2, a5
; CHECK-NEXT: mul a4, a4, a1
-; CHECK-NEXT: add a0, a0, a4
; CHECK-NEXT: srli a3, a3, 3
-; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a3
+; CHECK-NEXT: add a0, a0, a4
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vsse64.v v16, (a0), a1, v0.t
; CHECK-NEXT: ret
@@ -567,36 +567,36 @@ define void @strided_store_nxv17f64(<vscale x 17 x double> %v, ptr %ptr, i32 sig
; CHECK-NEXT: vl8re64.v v0, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
; CHECK-NEXT: vsse64.v v8, (a1), a2, v0.t
; CHECK-NEXT: sub a0, a5, a4
; CHECK-NEXT: sltu t0, a5, a0
; CHECK-NEXT: addi t0, t0, -1
-; CHECK-NEXT: and a0, t0, a0
-; CHECK-NEXT: mul a7, a7, a2
-; CHECK-NEXT: add a7, a1, a7
-; CHECK-NEXT: srli t0, a4, 3
+; CHECK-NEXT: and t0, t0, a0
+; CHECK-NEXT: mul a0, a7, a2
+; CHECK-NEXT: add a7, a1, a0
+; CHECK-NEXT: srli a0, a4, 3
; CHECK-NEXT: vsetvli t1, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v24, t0
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v24, a0
; CHECK-NEXT: sub a0, a3, a6
; CHECK-NEXT: sltu a3, a3, a0
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
+; CHECK-NEXT: vsetvli zero, t0, e64, m8, ta, ma
; CHECK-NEXT: vsse64.v v16, (a7), a2, v0.t
; CHECK-NEXT: bltu a0, a4, .LBB43_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a0, a4
; CHECK-NEXT: .LBB43_6:
; CHECK-NEXT: mul a3, a5, a2
-; CHECK-NEXT: add a1, a1, a3
; CHECK-NEXT: srli a4, a4, 2
-; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v24, a4
+; CHECK-NEXT: add a1, a1, a3
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsse64.v v8, (a1), a2, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll
index dd2c14b037ee..cd9edca1d4c4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vaaddu-sdnode.ll
@@ -37,9 +37,9 @@ define <vscale x 8 x i8> @vaaddu_vx_nxv8i8_floor(<vscale x 8 x i8> %x, i8 %y) {
define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_floor_sexti16(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y) {
; CHECK-LABEL: vaaddu_vv_nxv8i8_floor_sexti16:
; CHECK: # %bb.0:
+; CHECK-NEXT: csrwi vxrm, 2
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT: vwadd.vv v10, v8, v9
-; CHECK-NEXT: vnsrl.wi v8, v10, 1
+; CHECK-NEXT: vaadd.vv v8, v8, v9
; CHECK-NEXT: ret
%xzv = sext <vscale x 8 x i8> %x to <vscale x 8 x i16>
%yzv = sext <vscale x 8 x i8> %y to <vscale x 8 x i16>
@@ -226,12 +226,9 @@ define <vscale x 8 x i8> @vaaddu_vx_nxv8i8_ceil(<vscale x 8 x i8> %x, i8 %y) {
define <vscale x 8 x i8> @vaaddu_vv_nxv8i8_ceil_sexti16(<vscale x 8 x i8> %x, <vscale x 8 x i8> %y) {
; CHECK-LABEL: vaaddu_vv_nxv8i8_ceil_sexti16:
; CHECK: # %bb.0:
+; CHECK-NEXT: csrwi vxrm, 0
; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
-; CHECK-NEXT: vwadd.vv v10, v8, v9
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vadd.vi v10, v10, 1
-; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma
-; CHECK-NEXT: vnsrl.wi v8, v10, 1
+; CHECK-NEXT: vaadd.vv v8, v8, v9
; CHECK-NEXT: ret
%xzv = sext <vscale x 8 x i8> %x to <vscale x 8 x i16>
%yzv = sext <vscale x 8 x i8> %y to <vscale x 8 x i16>
diff --git a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
index 4b5e737d22eb..ede395f4df8e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vadd-vp.ll
@@ -580,8 +580,8 @@ define <vscale x 128 x i8> @vadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.add.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
@@ -1359,8 +1359,8 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.add.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
@@ -1415,8 +1415,8 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB120_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%evl = call i32 @llvm.vscale.i32()
@@ -1451,8 +1451,8 @@ define <vscale x 32 x i32> @vadd_vi_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, <
; RV64-NEXT: slli a0, a0, 1
; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
%evl = call i32 @llvm.vscale.i32()
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
index 939a45e15c10..6e34d59a2d98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcopysign-vp.ll
@@ -255,9 +255,9 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -272,8 +272,8 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsgnj.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -312,9 +312,9 @@ define <vscale x 32 x half> @vfsgnj_vv_nxv32f16_unmasked(<vscale x 32 x half> %v
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
index 6f06d8e570de..e59a9174b03d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vcpop.ll
@@ -44,8 +44,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -98,8 +98,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -138,8 +138,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -178,8 +178,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -218,8 +218,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 16
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -258,8 +258,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 32
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -298,8 +298,8 @@ define iXLen @intrinsic_vcpop_mask_m_nxv64i1(<vscale x 64 x i1> %0, <vscale x 64
; CHECK-LABEL: intrinsic_vcpop_mask_m_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
index d02fe5b205f7..0ef7572890ef 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
@@ -12,16 +12,16 @@ define {<16 x i1>, <16 x i1>} @vector_deinterleave_v16i1_v32i1(<32 x i1> %vec) {
; CHECK-NEXT: vmerge.vim v10, v8, 1, v0
; CHECK-NEXT: vid.v v9
; CHECK-NEXT: vadd.vv v11, v9, v9
-; CHECK-NEXT: vrgather.vv v9, v10, v11
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v0, 2
; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
+; CHECK-NEXT: vrgather.vv v9, v10, v11
; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
-; CHECK-NEXT: vadd.vi v12, v11, -16
; CHECK-NEXT: li a0, -256
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv.s.x v0, a0
; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, mu
+; CHECK-NEXT: vadd.vi v12, v11, -16
; CHECK-NEXT: vrgather.vv v9, v8, v12, v0.t
; CHECK-NEXT: vmsne.vi v9, v9, 0
; CHECK-NEXT: vadd.vi v12, v11, 1
@@ -80,9 +80,8 @@ define {<2 x i64>, <2 x i64>} @vector_deinterleave_v2i64_v4i64(<4 x i64> %vec) {
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 2
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vrgather.vi v9, v8, 1
; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0
; CHECK-NEXT: vslideup.vi v8, v10, 1
@@ -167,9 +166,8 @@ define {<2 x double>, <2 x double>} @vector_deinterleave_v2f64_v4f64(<4 x double
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v8, 2
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vmv.v.i v0, 2
; CHECK-NEXT: vrgather.vi v9, v8, 1
; CHECK-NEXT: vmerge.vvm v9, v9, v10, v0
; CHECK-NEXT: vslideup.vi v8, v10, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
index 8f4ff37fffb0..f0f847c61f3b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
@@ -110,23 +110,22 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_load_nxv8i6
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vl8re64.v v8, (a1)
-; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: vl8re64.v v8, (a0)
+; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a2, 24
-; CHECK-NEXT: mul a1, a1, a2
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT: vl8re64.v v0, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
-; CHECK-NEXT: vid.v v8
-; CHECK-NEXT: vadd.vv v16, v8, v8
-; CHECK-NEXT: vrgather.vv v8, v0, v16
+; CHECK-NEXT: mul a0, a0, a2
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vl8re64.v v0, (a1)
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma
+; CHECK-NEXT: vid.v v8
+; CHECK-NEXT: vadd.vv v16, v8, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
@@ -134,34 +133,47 @@ define {<vscale x 8 x i64>, <vscale x 8 x i64>} @vector_deinterleave_load_nxv8i6
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vrgather.vv v24, v8, v16
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vrgather.vv v8, v0, v16
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vadd.vi v8, v16, 1
-; CHECK-NEXT: vrgather.vv v16, v0, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a1, 24
; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vrgather.vv v16, v0, v8
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vrgather.vv v24, v0, v8
-; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: li a1, 24
+; CHECK-NEXT: mul a0, a0, a1
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vmv4r.v v28, v8
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: li a1, 24
+; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmv4r.v v28, v8
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmv4r.v v20, v8
; CHECK-NEXT: vmv8r.v v8, v24
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
index 7797577362c9..bcb008857ad3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave.ll
@@ -8,14 +8,15 @@ define {<vscale x 16 x i1>, <vscale x 16 x i1>} @vector_deinterleave_nxv16i1_nxv
; CHECK-LABEL: vector_deinterleave_nxv16i1_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vmerge.vim v12, v8, 1, v0
+; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v0, a0
+; CHECK-NEXT: vslidedown.vx v8, v0, a0
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
-; CHECK-NEXT: vmerge.vim v14, v8, 1, v0
+; CHECK-NEXT: vmerge.vim v12, v10, 1, v0
+; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vmerge.vim v14, v10, 1, v0
; CHECK-NEXT: vnsrl.wi v10, v12, 0
; CHECK-NEXT: vmsne.vi v8, v10, 0
; CHECK-NEXT: vnsrl.wi v10, v12, 8
@@ -90,25 +91,38 @@ declare {<vscale x 2 x i64>, <vscale x 2 x i64>} @llvm.vector.deinterleave2.nxv4
define {<vscale x 64 x i1>, <vscale x 64 x i1>} @vector_deinterleave_nxv64i1_nxv128i1(<vscale x 128 x i1> %vec) {
; CHECK-LABEL: vector_deinterleave_nxv64i1_nxv128i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v28, v8
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x02, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 2 * vlenb
+; CHECK-NEXT: vmv1r.v v12, v8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmv.v.i v8, 0
-; CHECK-NEXT: vmerge.vim v16, v8, 1, v0
+; CHECK-NEXT: vmv.v.i v24, 0
+; CHECK-NEXT: vmerge.vim v16, v24, 1, v0
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT: vnsrl.wi v24, v16, 0
+; CHECK-NEXT: vnsrl.wi v8, v16, 0
+; CHECK-NEXT: vmv1r.v v0, v12
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v28
-; CHECK-NEXT: vmerge.vim v8, v8, 1, v0
+; CHECK-NEXT: vmerge.vim v24, v24, 1, v0
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT: vnsrl.wi v28, v8, 0
+; CHECK-NEXT: vnsrl.wi v12, v24, 0
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmsne.vi v7, v24, 0
+; CHECK-NEXT: vmsne.vi v7, v8, 0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vs1r.v v7, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
-; CHECK-NEXT: vnsrl.wi v24, v16, 8
-; CHECK-NEXT: vnsrl.wi v28, v8, 8
+; CHECK-NEXT: vnsrl.wi v0, v16, 8
+; CHECK-NEXT: vnsrl.wi v4, v24, 8
; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
-; CHECK-NEXT: vmsne.vi v9, v24, 0
-; CHECK-NEXT: vmv1r.v v8, v7
+; CHECK-NEXT: vmsne.vi v9, v0, 0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 1
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%retval = call {<vscale x 64 x i1>, <vscale x 64 x i1>} @llvm.vector.deinterleave2.nxv128i1(<vscale x 128 x i1> %vec)
ret {<vscale x 64 x i1>, <vscale x 64 x i1>} %retval
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index 7ade47e60bc6..5ebf63f0a441 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -107,14 +107,14 @@ define void @vector_interleave_store_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vs
; CHECK-NEXT: srli a2, a1, 1
; CHECK-NEXT: vsetvli a3, zero, e16, m2, ta, mu
; CHECK-NEXT: vid.v v24
-; CHECK-NEXT: vsrl.vi v26, v24, 1
-; CHECK-NEXT: vand.vi v24, v24, 1
-; CHECK-NEXT: vmsne.vi v28, v24, 0
+; CHECK-NEXT: vand.vi v26, v24, 1
+; CHECK-NEXT: vmsne.vi v28, v26, 0
+; CHECK-NEXT: vsrl.vi v24, v24, 1
; CHECK-NEXT: vmv1r.v v0, v28
-; CHECK-NEXT: vadd.vx v26, v26, a2, v0.t
+; CHECK-NEXT: vadd.vx v24, v24, a2, v0.t
; CHECK-NEXT: vmv4r.v v12, v16
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v0, v8, v26
+; CHECK-NEXT: vrgatherei16.vv v0, v8, v24
; CHECK-NEXT: csrr a2, vlenb
; CHECK-NEXT: slli a2, a2, 3
; CHECK-NEXT: add a2, sp, a2
@@ -123,7 +123,7 @@ define void @vector_interleave_store_nxv16i64_nxv8i64(<vscale x 8 x i64> %a, <vs
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vrgatherei16.vv v8, v16, v26
+; CHECK-NEXT: vrgatherei16.vv v8, v16, v24
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vs8r.v v8, (a1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index a7e0ad6ee5f4..2e9f62e2f552 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -122,9 +122,9 @@ define <vscale x 4 x i64> @vector_interleave_nxv4i64_nxv2i64(<vscale x 2 x i64>
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vid.v v12
+; CHECK-NEXT: vand.vi v13, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v13, 0
; CHECK-NEXT: vsrl.vi v16, v12, 1
-; CHECK-NEXT: vand.vi v12, v12, 1
-; CHECK-NEXT: vmsne.vi v0, v12, 0
; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
@@ -137,9 +137,9 @@ define <vscale x 4 x i64> @vector_interleave_nxv4i64_nxv2i64(<vscale x 2 x i64>
; ZVBB-NEXT: srli a0, a0, 2
; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; ZVBB-NEXT: vid.v v12
+; ZVBB-NEXT: vand.vi v13, v12, 1
+; ZVBB-NEXT: vmsne.vi v0, v13, 0
; ZVBB-NEXT: vsrl.vi v16, v12, 1
-; ZVBB-NEXT: vand.vi v12, v12, 1
-; ZVBB-NEXT: vmsne.vi v0, v12, 0
; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t
; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
@@ -269,13 +269,13 @@ define <vscale x 32 x i32> @vector_interleave_nxv32i32_nxv16i32(<vscale x 16 x i
;
; ZVBB-LABEL: vector_interleave_nxv32i32_nxv16i32:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: li a0, 32
; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; ZVBB-NEXT: vwsll.vx v24, v16, a0
-; ZVBB-NEXT: vwaddu.wv v24, v24, v8
+; ZVBB-NEXT: vwsll.vx v8, v16, a0
+; ZVBB-NEXT: vwaddu.wv v8, v8, v24
; ZVBB-NEXT: vwsll.vx v0, v20, a0
-; ZVBB-NEXT: vwaddu.wv v0, v0, v12
-; ZVBB-NEXT: vmv8r.v v8, v24
+; ZVBB-NEXT: vwaddu.wv v0, v0, v28
; ZVBB-NEXT: vmv8r.v v16, v0
; ZVBB-NEXT: ret
%res = call <vscale x 32 x i32> @llvm.vector.interleave2.nxv32i32(<vscale x 16 x i32> %a, <vscale x 16 x i32> %b)
@@ -288,44 +288,32 @@ define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv8r.v v0, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vid.v v24
-; CHECK-NEXT: vsrl.vi v6, v24, 1
-; CHECK-NEXT: vand.vi v8, v24, 1
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vadd.vx v6, v6, a0, v0.t
+; CHECK-NEXT: vand.vi v26, v24, 1
+; CHECK-NEXT: vmsne.vi v10, v26, 0
+; CHECK-NEXT: vsrl.vi v8, v24, 1
+; CHECK-NEXT: vmv8r.v v24, v0
+; CHECK-NEXT: vmv4r.v v12, v4
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: vmv4r.v v28, v16
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v8, v24, v6
+; CHECK-NEXT: vrgatherei16.vv v0, v24, v8
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vrgatherei16.vv v24, v16, v6
-; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vrgatherei16.vv v24, v16, v8
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmv.v.v v16, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
@@ -335,44 +323,32 @@ define <vscale x 16 x i64> @vector_interleave_nxv16i64_nxv8i64(<vscale x 8 x i64
; ZVBB-NEXT: addi sp, sp, -16
; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: slli a0, a0, 4
-; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 3
-; ZVBB-NEXT: add a0, sp, a0
-; ZVBB-NEXT: addi a0, a0, 16
-; ZVBB-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVBB-NEXT: vmv8r.v v0, v8
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: srli a0, a0, 1
; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; ZVBB-NEXT: vid.v v24
-; ZVBB-NEXT: vsrl.vi v6, v24, 1
-; ZVBB-NEXT: vand.vi v8, v24, 1
-; ZVBB-NEXT: vmsne.vi v0, v8, 0
-; ZVBB-NEXT: csrr a1, vlenb
-; ZVBB-NEXT: slli a1, a1, 3
-; ZVBB-NEXT: add a1, sp, a1
-; ZVBB-NEXT: addi a1, a1, 16
-; ZVBB-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; ZVBB-NEXT: vadd.vx v6, v6, a0, v0.t
+; ZVBB-NEXT: vand.vi v26, v24, 1
+; ZVBB-NEXT: vmsne.vi v10, v26, 0
+; ZVBB-NEXT: vsrl.vi v8, v24, 1
+; ZVBB-NEXT: vmv8r.v v24, v0
+; ZVBB-NEXT: vmv4r.v v12, v4
+; ZVBB-NEXT: vmv1r.v v0, v10
+; ZVBB-NEXT: vadd.vx v8, v8, a0, v0.t
; ZVBB-NEXT: vmv4r.v v28, v16
; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; ZVBB-NEXT: vrgatherei16.vv v8, v24, v6
+; ZVBB-NEXT: vrgatherei16.vv v0, v24, v8
; ZVBB-NEXT: addi a0, sp, 16
-; ZVBB-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: slli a0, a0, 3
-; ZVBB-NEXT: add a0, sp, a0
-; ZVBB-NEXT: addi a0, a0, 16
-; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVBB-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; ZVBB-NEXT: vmv4r.v v16, v12
-; ZVBB-NEXT: vrgatherei16.vv v24, v16, v6
-; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: vrgatherei16.vv v24, v16, v8
; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVBB-NEXT: vmv.v.v v16, v24
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: slli a0, a0, 4
+; ZVBB-NEXT: slli a0, a0, 3
; ZVBB-NEXT: add sp, sp, a0
; ZVBB-NEXT: addi sp, sp, 16
; ZVBB-NEXT: ret
@@ -516,9 +492,9 @@ define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x do
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; CHECK-NEXT: vid.v v12
+; CHECK-NEXT: vand.vi v13, v12, 1
+; CHECK-NEXT: vmsne.vi v0, v13, 0
; CHECK-NEXT: vsrl.vi v16, v12, 1
-; CHECK-NEXT: vand.vi v12, v12, 1
-; CHECK-NEXT: vmsne.vi v0, v12, 0
; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; CHECK-NEXT: vrgatherei16.vv v12, v8, v16
@@ -531,9 +507,9 @@ define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x do
; ZVBB-NEXT: srli a0, a0, 2
; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu
; ZVBB-NEXT: vid.v v12
+; ZVBB-NEXT: vand.vi v13, v12, 1
+; ZVBB-NEXT: vmsne.vi v0, v13, 0
; ZVBB-NEXT: vsrl.vi v16, v12, 1
-; ZVBB-NEXT: vand.vi v12, v12, 1
-; ZVBB-NEXT: vmsne.vi v0, v12, 0
; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t
; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma
; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16
@@ -593,13 +569,13 @@ define <vscale x 32 x float> @vector_interleave_nxv32f32_nxv16f32(<vscale x 16 x
;
; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32:
; ZVBB: # %bb.0:
+; ZVBB-NEXT: vmv8r.v v24, v8
; ZVBB-NEXT: li a0, 32
; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; ZVBB-NEXT: vwsll.vx v24, v16, a0
-; ZVBB-NEXT: vwaddu.wv v24, v24, v8
+; ZVBB-NEXT: vwsll.vx v8, v16, a0
+; ZVBB-NEXT: vwaddu.wv v8, v8, v24
; ZVBB-NEXT: vwsll.vx v0, v20, a0
-; ZVBB-NEXT: vwaddu.wv v0, v0, v12
-; ZVBB-NEXT: vmv8r.v v8, v24
+; ZVBB-NEXT: vwaddu.wv v0, v0, v28
; ZVBB-NEXT: vmv8r.v v16, v0
; ZVBB-NEXT: ret
%res = call <vscale x 32 x float> @llvm.vector.interleave2.nxv32f32(<vscale x 16 x float> %a, <vscale x 16 x float> %b)
@@ -612,44 +588,32 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: sub sp, sp, a0
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: sub sp, sp, a0
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv8r.v v0, v8
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; CHECK-NEXT: vid.v v24
-; CHECK-NEXT: vsrl.vi v6, v24, 1
-; CHECK-NEXT: vand.vi v8, v24, 1
-; CHECK-NEXT: vmsne.vi v0, v8, 0
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
-; CHECK-NEXT: add a1, sp, a1
-; CHECK-NEXT: addi a1, a1, 16
-; CHECK-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT: vadd.vx v6, v6, a0, v0.t
+; CHECK-NEXT: vand.vi v26, v24, 1
+; CHECK-NEXT: vmsne.vi v10, v26, 0
+; CHECK-NEXT: vsrl.vi v8, v24, 1
+; CHECK-NEXT: vmv8r.v v24, v0
+; CHECK-NEXT: vmv4r.v v12, v4
+; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: vmv4r.v v28, v16
; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v8, v24, v6
+; CHECK-NEXT: vrgatherei16.vv v0, v24, v8
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vrgatherei16.vv v24, v16, v6
-; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vrgatherei16.vv v24, v16, v8
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vmv.v.v v16, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
@@ -659,44 +623,32 @@ define <vscale x 16 x double> @vector_interleave_nxv16f64_nxv8f64(<vscale x 8 x
; ZVBB-NEXT: addi sp, sp, -16
; ZVBB-NEXT: .cfi_def_cfa_offset 16
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: slli a0, a0, 4
-; ZVBB-NEXT: sub sp, sp, a0
-; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: slli a0, a0, 3
-; ZVBB-NEXT: add a0, sp, a0
-; ZVBB-NEXT: addi a0, a0, 16
-; ZVBB-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVBB-NEXT: sub sp, sp, a0
+; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVBB-NEXT: vmv8r.v v0, v8
; ZVBB-NEXT: csrr a0, vlenb
; ZVBB-NEXT: srli a0, a0, 1
; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu
; ZVBB-NEXT: vid.v v24
-; ZVBB-NEXT: vsrl.vi v6, v24, 1
-; ZVBB-NEXT: vand.vi v8, v24, 1
-; ZVBB-NEXT: vmsne.vi v0, v8, 0
-; ZVBB-NEXT: csrr a1, vlenb
-; ZVBB-NEXT: slli a1, a1, 3
-; ZVBB-NEXT: add a1, sp, a1
-; ZVBB-NEXT: addi a1, a1, 16
-; ZVBB-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
-; ZVBB-NEXT: vadd.vx v6, v6, a0, v0.t
+; ZVBB-NEXT: vand.vi v26, v24, 1
+; ZVBB-NEXT: vmsne.vi v10, v26, 0
+; ZVBB-NEXT: vsrl.vi v8, v24, 1
+; ZVBB-NEXT: vmv8r.v v24, v0
+; ZVBB-NEXT: vmv4r.v v12, v4
+; ZVBB-NEXT: vmv1r.v v0, v10
+; ZVBB-NEXT: vadd.vx v8, v8, a0, v0.t
; ZVBB-NEXT: vmv4r.v v28, v16
; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma
-; ZVBB-NEXT: vrgatherei16.vv v8, v24, v6
+; ZVBB-NEXT: vrgatherei16.vv v0, v24, v8
; ZVBB-NEXT: addi a0, sp, 16
-; ZVBB-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: slli a0, a0, 3
-; ZVBB-NEXT: add a0, sp, a0
-; ZVBB-NEXT: addi a0, a0, 16
-; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVBB-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
; ZVBB-NEXT: vmv4r.v v16, v12
-; ZVBB-NEXT: vrgatherei16.vv v24, v16, v6
-; ZVBB-NEXT: addi a0, sp, 16
+; ZVBB-NEXT: vrgatherei16.vv v24, v16, v8
; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVBB-NEXT: vmv.v.v v16, v24
; ZVBB-NEXT: csrr a0, vlenb
-; ZVBB-NEXT: slli a0, a0, 4
+; ZVBB-NEXT: slli a0, a0, 3
; ZVBB-NEXT: add sp, sp, a0
; ZVBB-NEXT: addi sp, sp, 16
; ZVBB-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
index be56db52e349..8cb6fed2f588 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-splice.ll
@@ -209,9 +209,7 @@ define <vscale x 16 x i1> @splice_nxv16i1_offset_negone(<vscale x 16 x i1> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 1
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v12, 1
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -259,9 +257,7 @@ define <vscale x 32 x i1> @splice_nxv32i1_offset_negone(<vscale x 32 x i1> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma
; CHECK-NEXT: vslideup.vi v8, v16, 1
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -282,8 +278,8 @@ define <vscale x 32 x i1> @splice_nxv32i1_offset_max(<vscale x 32 x i1> %a, <vsc
; CHECK-NEXT: li a1, 63
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vx v16, v16, a1
-; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; CHECK-NEXT: vmerge.vim v8, v12, 1, v0
; CHECK-NEXT: vslideup.vx v16, v8, a0
; CHECK-NEXT: vand.vi v8, v16, 1
@@ -308,9 +304,7 @@ define <vscale x 64 x i1> @splice_nxv64i1_offset_negone(<vscale x 64 x i1> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma
; CHECK-NEXT: vslideup.vi v8, v16, 1
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
@@ -331,8 +325,8 @@ define <vscale x 64 x i1> @splice_nxv64i1_offset_max(<vscale x 64 x i1> %a, <vsc
; CHECK-NEXT: li a1, 127
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v24, v24, a1
-; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
; CHECK-NEXT: vmerge.vim v8, v16, 1, v0
; CHECK-NEXT: vslideup.vx v24, v8, a0
; CHECK-NEXT: vand.vi v8, v24, 1
@@ -358,9 +352,8 @@ define <vscale x 1 x i8> @splice_nxv1i8_offset_negone(<vscale x 1 x i8> %a, <vsc
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x i8> @llvm.vector.splice.nxv1i8(<vscale x 1 x i8> %a, <vscale x 1 x i8> %b, i32 -1)
@@ -413,9 +406,8 @@ define <vscale x 2 x i8> @splice_nxv2i8_offset_negone(<vscale x 2 x i8> %a, <vsc
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, mf4, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x i8> @llvm.vector.splice.nxv2i8(<vscale x 2 x i8> %a, <vscale x 2 x i8> %b, i32 -1)
@@ -468,9 +460,8 @@ define <vscale x 4 x i8> @splice_nxv4i8_offset_negone(<vscale x 4 x i8> %a, <vsc
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, mf2, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x i8> @llvm.vector.splice.nxv4i8(<vscale x 4 x i8> %a, <vscale x 4 x i8> %b, i32 -1)
@@ -522,9 +513,8 @@ define <vscale x 8 x i8> @splice_nxv8i8_offset_negone(<vscale x 8 x i8> %a, <vsc
; CHECK: # %bb.0:
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e8, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 8 x i8> @llvm.vector.splice.nxv8i8(<vscale x 8 x i8> %a, <vscale x 8 x i8> %b, i32 -1)
@@ -745,9 +735,8 @@ define <vscale x 1 x i16> @splice_nxv1i16_offset_negone(<vscale x 1 x i16> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x i16> @llvm.vector.splice.nxv1i16(<vscale x 1 x i16> %a, <vscale x 1 x i16> %b, i32 -1)
@@ -800,9 +789,8 @@ define <vscale x 2 x i16> @splice_nxv2i16_offset_negone(<vscale x 2 x i16> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x i16> @llvm.vector.splice.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i16> %b, i32 -1)
@@ -855,9 +843,8 @@ define <vscale x 4 x i16> @splice_nxv4i16_offset_negone(<vscale x 4 x i16> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x i16> @llvm.vector.splice.nxv4i16(<vscale x 4 x i16> %a, <vscale x 4 x i16> %b, i32 -1)
@@ -1075,9 +1062,8 @@ define <vscale x 1 x i32> @splice_nxv1i32_offset_negone(<vscale x 1 x i32> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x i32> @llvm.vector.splice.nxv1i32(<vscale x 1 x i32> %a, <vscale x 1 x i32> %b, i32 -1)
@@ -1130,9 +1116,8 @@ define <vscale x 2 x i32> @splice_nxv2i32_offset_negone(<vscale x 2 x i32> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x i32> @llvm.vector.splice.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i32> %b, i32 -1)
@@ -1348,9 +1333,8 @@ define <vscale x 1 x i64> @splice_nxv1i64_offset_negone(<vscale x 1 x i64> %a, <
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x i64> @llvm.vector.splice.nxv1i64(<vscale x 1 x i64> %a, <vscale x 1 x i64> %b, i32 -1)
@@ -1565,9 +1549,8 @@ define <vscale x 1 x half> @splice_nxv1f16_offset_negone(<vscale x 1 x half> %a,
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf4, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x half> @llvm.vector.splice.nxv1f16(<vscale x 1 x half> %a, <vscale x 1 x half> %b, i32 -1)
@@ -1620,9 +1603,8 @@ define <vscale x 2 x half> @splice_nxv2f16_offset_negone(<vscale x 2 x half> %a,
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e16, mf2, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x half> @llvm.vector.splice.nxv2f16(<vscale x 2 x half> %a, <vscale x 2 x half> %b, i32 -1)
@@ -1675,9 +1657,8 @@ define <vscale x 4 x half> @splice_nxv4f16_offset_negone(<vscale x 4 x half> %a,
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 1
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 4 x half> @llvm.vector.splice.nxv4f16(<vscale x 4 x half> %a, <vscale x 4 x half> %b, i32 -1)
@@ -1895,9 +1876,8 @@ define <vscale x 1 x float> @splice_nxv1f32_offset_negone(<vscale x 1 x float> %
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x float> @llvm.vector.splice.nxv1f32(<vscale x 1 x float> %a, <vscale x 1 x float> %b, i32 -1)
@@ -1950,9 +1930,8 @@ define <vscale x 2 x float> @splice_nxv2f32_offset_negone(<vscale x 2 x float> %
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e32, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 2 x float> @llvm.vector.splice.nxv2f32(<vscale x 2 x float> %a, <vscale x 2 x float> %b, i32 -1)
@@ -2168,9 +2147,8 @@ define <vscale x 1 x double> @splice_nxv1f64_offset_negone(<vscale x 1 x double>
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: srli a0, a0, 3
; CHECK-NEXT: addi a0, a0, -1
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
+; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vslidedown.vx v8, v8, a0
-; CHECK-NEXT: vsetvli a0, zero, e64, m1, ta, ma
; CHECK-NEXT: vslideup.vi v8, v9, 1
; CHECK-NEXT: ret
%res = call <vscale x 1 x double> @llvm.vector.splice.nxv1f64(<vscale x 1 x double> %a, <vscale x 1 x double> %b, i32 -1)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
index df2bc523cd7a..0f7e3f1e0ea5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfabs-vp.ll
@@ -250,8 +250,8 @@ define <vscale x 32 x half> @vfabs_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfabs.v v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -541,8 +541,8 @@ define <vscale x 16 x double> @vfabs_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfabs.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.fabs.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
index c69a7bc5cece..ad7fb63fec2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfadd-vp.ll
@@ -579,9 +579,9 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -596,8 +596,8 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -636,9 +636,9 @@ define <vscale x 32 x half> @vfadd_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -675,12 +675,20 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
;
; ZVFHMIN-LABEL: vfadd_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 2
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -692,7 +700,9 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vl4r.v v12, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -702,11 +712,15 @@ define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB24_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vfadd.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
index 3ad17e85570a..81d844d1950a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfdiv-vp.ll
@@ -525,9 +525,9 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -542,8 +542,8 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -582,9 +582,9 @@ define <vscale x 32 x half> @vfdiv_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -621,12 +621,20 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
;
; ZVFHMIN-LABEL: vfdiv_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 2
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -638,7 +646,9 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vl4r.v v12, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -648,11 +658,15 @@ define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vfdiv.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
index 3be3f835f3d1..eafd605c6110 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfirst.ll
@@ -44,8 +44,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv1i1(<vscale x 1 x i1> %0, <vscale x 1 x
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -98,8 +98,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv2i1(<vscale x 2 x i1> %0, <vscale x 2 x
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -138,8 +138,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv4i1(<vscale x 4 x i1> %0, <vscale x 4 x
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -178,8 +178,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv8i1(<vscale x 8 x i1> %0, <vscale x 8 x
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -218,8 +218,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv16i1(<vscale x 16 x i1> %0, <vscale x 1
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -258,8 +258,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv32i1(<vscale x 32 x i1> %0, <vscale x 3
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
@@ -298,8 +298,8 @@ define iXLen @intrinsic_vfirst_mask_m_nxv64i1(<vscale x 64 x i1> %0, <vscale x 6
; CHECK-LABEL: intrinsic_vfirst_mask_m_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vfirst.m a0, v9, v0.t
; CHECK-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
index 0f47236d6600..8201f18175e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll
@@ -1134,12 +1134,10 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: add a6, sp, a6
; CHECK-NEXT: addi a6, a6, 16
; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
+; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: sub a6, a4, a1
; CHECK-NEXT: sltu a7, a4, a6
; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: and a6, a7, a6
-; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
-; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: vl8re64.v v8, (a5)
; CHECK-NEXT: vl8re64.v v16, (a2)
; CHECK-NEXT: csrr a2, vlenb
@@ -1150,19 +1148,21 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: vl8re64.v v16, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a3
-; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a2, 24
-; CHECK-NEXT: mul a0, a0, a2
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: and a0, a7, a6
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: li a3, 24
+; CHECK-NEXT: mul a2, a2, a3
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 4
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a2, 24
@@ -1174,7 +1174,6 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB92_2:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
@@ -1188,6 +1187,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: csrr a0, vlenb
@@ -1234,24 +1234,23 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: vl8re64.v v16, (a3)
; CHECK-NEXT: sub a3, a4, a1
; CHECK-NEXT: sltu a5, a4, a3
-; CHECK-NEXT: addi a5, a5, -1
; CHECK-NEXT: vl8re64.v v8, (a2)
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vl8re64.v v0, (a0)
+; CHECK-NEXT: addi a5, a5, -1
; CHECK-NEXT: and a3, a5, a3
-; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v16, v8, v24
; CHECK-NEXT: bltu a4, a1, .LBB93_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB93_2:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
@@ -1259,6 +1258,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v0, v24, v8
; CHECK-NEXT: vmv.v.v v8, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
index d4ba0f8c9073..c15b875e8f0c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-constrained-sdnode.ll
@@ -239,15 +239,15 @@ define <vscale x 16 x half> @vfmadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscal
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24
+; ZVFHMIN-NEXT: vfmadd.vv v24, v0, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 2
; ZVFHMIN-NEXT: add sp, sp, a0
@@ -315,87 +315,88 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 40
-; ZVFHMIN-NEXT: mul a1, a1, a2
+; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv8r.v v0, v8
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 24
-; ZVFHMIN-NEXT: mul a1, a1, a2
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT: vl8re16.v v0, (a0)
+; ZVFHMIN-NEXT: vmv8r.v v24, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN-NEXT: vmv8r.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16
+; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8
-; ZVFHMIN-NEXT: vmv8r.v v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 40
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
@@ -415,77 +416,68 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
-; ZVFHMIN-NEXT: vmv8r.v v0, v16
-; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: sub sp, sp, a0
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v0
+; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v4
+; ZVFHMIN-NEXT: vmv.v.v v8, v4
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v28
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v28
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
index 8a72b2ddafac..4ce556efb44d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd-sdnode.ll
@@ -323,87 +323,88 @@ define <vscale x 32 x half> @vfmadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 40
-; ZVFHMIN-NEXT: mul a1, a1, a2
+; ZVFHMIN-NEXT: slli a1, a1, 5
; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv8r.v v0, v8
-; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 24
-; ZVFHMIN-NEXT: mul a1, a1, a2
-; ZVFHMIN-NEXT: add a1, sp, a1
-; ZVFHMIN-NEXT: addi a1, a1, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; ZVFHMIN-NEXT: vl8re16.v v0, (a0)
+; ZVFHMIN-NEXT: vmv8r.v v24, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN-NEXT: vmv8r.v v8, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 5
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v16
+; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8
-; ZVFHMIN-NEXT: vmv8r.v v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 40
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 5
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
@@ -423,10 +424,14 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
@@ -435,33 +440,50 @@ define <vscale x 32 x half> @vfmadd_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmadd.vv v0, v8, v24
-; ZVFHMIN-NEXT: vmv8r.v v24, v8
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0
-; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv4r.v v12, v20
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v8, v24, v0
+; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: li a1, 24
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
index 7556b3ace5c6..7ab999ea4fa7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmax-vp.ll
@@ -255,9 +255,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -272,8 +272,8 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmax.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -312,9 +312,9 @@ define <vscale x 32 x half> @vfmax_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
index 755c66537612..e928df85b5bb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmin-vp.ll
@@ -255,9 +255,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -272,8 +272,8 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmin.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -312,9 +312,9 @@ define <vscale x 32 x half> @vfmin_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
index 5114f0a8d1d6..c835dc72268b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub-constrained-sdnode.ll
@@ -347,75 +347,64 @@ define <vscale x 32 x half> @vfmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 28
+; ZVFHMIN-NEXT: li a2, 24
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1c, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 28 * vlenb
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; ZVFHMIN-NEXT: vmv8r.v v24, v16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: slli a1, a1, 3
+; ZVFHMIN-NEXT: slli a1, a1, 4
; ZVFHMIN-NEXT: add a1, sp, a1
; ZVFHMIN-NEXT: addi a1, a1, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
+; ZVFHMIN-NEXT: vl8re16.v v16, (a0)
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 20
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: vmv4r.v v20, v12
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v0, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v0, v0
+; ZVFHMIN-NEXT: vfmacc.vv v0, v8, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v0
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 20
-; ZVFHMIN-NEXT: mul a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmacc.vv v8, v0, v16
+; ZVFHMIN-NEXT: vfneg.v v8, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v8, v0, v24
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 28
+; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
@@ -437,55 +426,93 @@ define <vscale x 32 x half> @vfmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: li a1, 28
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
-; ZVFHMIN-NEXT: vmv8r.v v24, v16
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1c, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 28 * vlenb
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv8r.v v24, v8
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v0, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v0, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v0
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 20
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 20
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v0, v0
+; ZVFHMIN-NEXT: vfmacc.vv v0, v24, v16
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmacc.vv v24, v0, v8
+; ZVFHMIN-NEXT: vfneg.v v8, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 20
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl4r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v28
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 20
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v16, v8, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 4
+; ZVFHMIN-NEXT: li a1, 28
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
index 30d5919238cf..61f3e63f246c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmul-vp.ll
@@ -525,9 +525,9 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -542,8 +542,8 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -582,9 +582,9 @@ define <vscale x 32 x half> @vfmul_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -621,12 +621,20 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
;
; ZVFHMIN-LABEL: vfmul_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 2
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -638,7 +646,9 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vl4r.v v12, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -648,11 +658,15 @@ define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vfmul.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
index bacf9bae83ed..abda6750e5a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmuladd-vp.ll
@@ -1134,12 +1134,10 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: add a6, sp, a6
; CHECK-NEXT: addi a6, a6, 16
; CHECK-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill
+; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: sub a6, a4, a1
; CHECK-NEXT: sltu a7, a4, a6
; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: and a6, a7, a6
-; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
-; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: vl8re64.v v8, (a5)
; CHECK-NEXT: vl8re64.v v16, (a2)
; CHECK-NEXT: csrr a2, vlenb
@@ -1150,19 +1148,21 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: vl8re64.v v16, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a3
-; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: li a2, 24
-; CHECK-NEXT: mul a0, a0, a2
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add a0, sp, a0
-; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: and a0, a7, a6
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: li a3, 24
+; CHECK-NEXT: mul a2, a2, a3
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 4
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v8, v24, v16, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: li a2, 24
@@ -1174,7 +1174,6 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB92_2:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 5
@@ -1188,6 +1187,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64(<vscale x 16 x double> %va, <vsc
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v16, v24, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v16
; CHECK-NEXT: csrr a0, vlenb
@@ -1234,24 +1234,23 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: vl8re64.v v16, (a3)
; CHECK-NEXT: sub a3, a4, a1
; CHECK-NEXT: sltu a5, a4, a3
-; CHECK-NEXT: addi a5, a5, -1
; CHECK-NEXT: vl8re64.v v8, (a2)
; CHECK-NEXT: addi a2, sp, 16
; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: vl8re64.v v0, (a0)
+; CHECK-NEXT: addi a5, a5, -1
; CHECK-NEXT: and a3, a5, a3
-; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v16, v8, v24
; CHECK-NEXT: bltu a4, a1, .LBB93_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a4, a1
; CHECK-NEXT: .LBB93_2:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: add a0, sp, a0
@@ -1259,6 +1258,7 @@ define <vscale x 16 x double> @vfma_vv_nxv16f64_unmasked(<vscale x 16 x double>
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vfmadd.vv v0, v24, v8
; CHECK-NEXT: vmv.v.v v8, v0
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
index 1db5fa1720a2..69ea7ce33cf6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfneg-vp.ll
@@ -250,8 +250,8 @@ define <vscale x 32 x half> @vfneg_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -541,8 +541,8 @@ define <vscale x 16 x double> @vfneg_vv_nxv16f64(<vscale x 16 x double> %va, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfneg.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.fneg.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
index 785f60ad1d39..b54590cd9d84 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd-constrained-sdnode.ll
@@ -412,85 +412,85 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; ZVFHMIN-NEXT: vl8re16.v v0, (a0)
+; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 12
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v24, v24
+; ZVFHMIN-NEXT: vfneg.v v0, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfneg.v v0, v0
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 20
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v24, v24
+; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v8
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v24, v24
+; ZVFHMIN-NEXT: vfneg.v v8, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v24, v24
+; ZVFHMIN-NEXT: vfneg.v v8, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl4r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 12
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v0
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 20
-; ZVFHMIN-NEXT: mul a0, a0, a1
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v20
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v0, v8, v24
+; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v0
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
@@ -515,16 +515,16 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: li a1, 20
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x0c, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 12 * vlenb
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x14, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 20 * vlenb
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vmv8r.v v16, v8
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
@@ -534,48 +534,61 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v0, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v20
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v0
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfneg.v v0, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: add a0, sp, a0
+; ZVFHMIN-NEXT: addi a0, a0, 16
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs4r.v v12, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v0, v0
+; ZVFHMIN-NEXT: vfmadd.vv v0, v16, v24
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfneg.v v16, v16
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
-; ZVFHMIN-NEXT: vmv.v.v v12, v8
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v24
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl4r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
+; ZVFHMIN-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v16
+; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 12
+; ZVFHMIN-NEXT: li a1, 20
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
index 1a2da051c962..2f7e693a8a6f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub-constrained-sdnode.ll
@@ -376,85 +376,76 @@ define <vscale x 32 x half> @vfnmsub_vv_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: addi sp, sp, -16
; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
; ZVFHMIN-NEXT: csrr a1, vlenb
-; ZVFHMIN-NEXT: li a2, 28
+; ZVFHMIN-NEXT: li a2, 24
; ZVFHMIN-NEXT: mul a1, a1, a2
; ZVFHMIN-NEXT: sub sp, sp, a1
-; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x1c, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 28 * vlenb
-; ZVFHMIN-NEXT: vl8re16.v v0, (a0)
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; ZVFHMIN-NEXT: vl8re16.v v24, (a0)
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v24, v24
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs4r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v0
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v24, v24
+; ZVFHMIN-NEXT: vfneg.v v0, v0
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v0, v24
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v24, v0
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
+; ZVFHMIN-NEXT: vmv4r.v v20, v12
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
-; ZVFHMIN-NEXT: add a0, sp, a0
-; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v8, v16, v0
+; ZVFHMIN-NEXT: vfneg.v v8, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
-; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v28
+; ZVFHMIN-NEXT: vs4r.v v16, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 24
-; ZVFHMIN-NEXT: mul a0, a0, a1
+; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl4r.v v20, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v20
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v28
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: slli a0, a0, 3
+; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v8, v0, v24
+; ZVFHMIN-NEXT: vl4r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: addi a0, sp, 16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v16, v24, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v20, v8
-; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
-; ZVFHMIN-NEXT: li a1, 28
+; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: add sp, sp, a0
; ZVFHMIN-NEXT: addi sp, sp, 16
@@ -480,7 +471,6 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: mul a0, a0, a1
; ZVFHMIN-NEXT: sub sp, sp, a0
; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
-; ZVFHMIN-NEXT: vmv8r.v v24, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
@@ -488,58 +478,59 @@ define <vscale x 32 x half> @vfnmsub_vf_nxv32f16(<vscale x 32 x half> %va, <vsca
; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmv.v.f v8, fa5
+; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfneg.v v0, v8
+; ZVFHMIN-NEXT: vfneg.v v24, v24
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v28, v0
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v16
+; ZVFHMIN-NEXT: vs8r.v v24, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vmv8r.v v8, v16
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v28
-; ZVFHMIN-NEXT: vmv.v.v v16, v28
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: vmv.v.v v8, v4
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: addi a0, sp, 16
-; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v0, v8, v24
-; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v0
+; ZVFHMIN-NEXT: vl8r.v v0, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v24, v0, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 4
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v28
+; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v20
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
; ZVFHMIN-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v16
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v0, v12
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: slli a0, a0, 3
; ZVFHMIN-NEXT: add a0, sp, a0
; ZVFHMIN-NEXT: addi a0, a0, 16
-; ZVFHMIN-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
-; ZVFHMIN-NEXT: vfmadd.vv v0, v24, v16
+; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
+; ZVFHMIN-NEXT: vfmadd.vv v16, v0, v8
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v0
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: li a1, 24
; ZVFHMIN-NEXT: mul a0, a0, a1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
index aaaf4ad46071..28dc3e765dc3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfpext-vp.ll
@@ -112,8 +112,8 @@ define <vscale x 32 x float> @vfpext_nxv32f16_nxv32f32(<vscale x 32 x half> %a,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB7_2:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfwcvt.f.f.v v24, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
index 15c4bf255e6d..f3544589407d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
@@ -394,7 +394,15 @@ declare <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptosi_nxv32i16_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
@@ -404,16 +412,22 @@ define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfncvt.rtz.x.f.w v28, v16, v0.t
+; CHECK-NEXT: vfncvt.rtz.x.f.w v20, v24, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB25_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfncvt.rtz.x.f.w v24, v8, v0.t
-; CHECK-NEXT: vmv8r.v v8, v24
+; CHECK-NEXT: vfncvt.rtz.x.f.w v16, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x i16> %v
@@ -440,8 +454,8 @@ define <vscale x 32 x i32> @vfptosi_nxv32i32_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.x.f.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.fptosi.nxv32i32.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
index a2591e7dc35f..9fd2d8edb220 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
@@ -394,7 +394,15 @@ declare <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x flo
define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfptoui_nxv32i16_nxv32f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmv1r.v v24, v0
+; CHECK-NEXT: addi sp, sp, -16
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: sub sp, sp, a1
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; CHECK-NEXT: vmv1r.v v7, v0
+; CHECK-NEXT: addi a1, sp, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: srli a2, a1, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
@@ -404,16 +412,22 @@ define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT: vfncvt.rtz.xu.f.w v28, v16, v0.t
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v20, v24, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB25_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB25_2:
+; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vfncvt.rtz.xu.f.w v24, v8, v0.t
-; CHECK-NEXT: vmv8r.v v8, v24
+; CHECK-NEXT: vfncvt.rtz.xu.f.w v16, v8, v0.t
+; CHECK-NEXT: vmv8r.v v8, v16
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x i16> %v
@@ -440,8 +454,8 @@ define <vscale x 32 x i32> @vfptoui_nxv32i32_nxv32f32(<vscale x 32 x float> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.rtz.xu.f.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.fptoui.nxv32i32.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
index 0c3abe37af27..d2219cf96359 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
@@ -113,16 +113,16 @@ define <vscale x 16 x float> @vfptrunc_nxv16f32_nxv16f64(<vscale x 16 x double>
; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a2, a3, a2
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
-; CHECK-NEXT: addi a2, sp, 16
-; CHECK-NEXT: vl8r.v v24, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vfncvt.f.f.w v20, v24, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB7_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB7_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v16, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: csrr a0, vlenb
@@ -169,11 +169,11 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: sub a6, a5, a1
; CHECK-NEXT: sltu a7, a5, a6
; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: and a6, a7, a6
-; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v16, a3
-; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
+; CHECK-NEXT: and a0, a7, a6
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v20, v8, v0.t
; CHECK-NEXT: bltu a5, a1, .LBB8_2
; CHECK-NEXT: # %bb.1:
@@ -181,8 +181,8 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: .LBB8_2:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v7, a3
-; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v16, v24, v0.t
; CHECK-NEXT: bltu a2, a4, .LBB8_4
; CHECK-NEXT: # %bb.3:
@@ -192,22 +192,22 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
; CHECK-NEXT: sltu a3, a2, a0
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v28, v8, v0.t
; CHECK-NEXT: bltu a2, a1, .LBB8_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB8_6:
-; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vfncvt.f.f.w v24, v8, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
index d6caad15e40a..bd229e0220a4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsqrt-vp.ll
@@ -250,8 +250,8 @@ define <vscale x 32 x half> @vfsqrt_vv_nxv32f16(<vscale x 32 x half> %va, <vscal
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB10_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v16
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsqrt.v v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -541,8 +541,8 @@ define <vscale x 16 x double> @vfsqrt_vv_nxv16f64(<vscale x 16 x double> %va, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsqrt.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.sqrt.nxv16f64(<vscale x 16 x double> %va, <vscale x 16 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
index 2eae18d7cc49..c833f8048fe3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfsub-vp.ll
@@ -525,9 +525,9 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: srli a2, a2, 2
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -542,8 +542,8 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale
; ZVFHMIN-NEXT: vl8r.v v24, (a1) # Unknown-size Folded Reload
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v24
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v24, v16, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
@@ -582,9 +582,9 @@ define <vscale x 32 x half> @vfsub_vv_nxv32f16_unmasked(<vscale x 32 x half> %va
; ZVFHMIN-NEXT: vmset.m v24
; ZVFHMIN-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v24, a2
-; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: addi a2, sp, 16
; ZVFHMIN-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill
+; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v20
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
@@ -621,12 +621,20 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
;
; ZVFHMIN-LABEL: vfsub_vf_nxv32f16:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv1r.v v3, v0
+; ZVFHMIN-NEXT: addi sp, sp, -16
+; ZVFHMIN-NEXT: .cfi_def_cfa_offset 16
+; ZVFHMIN-NEXT: csrr a1, vlenb
+; ZVFHMIN-NEXT: slli a1, a1, 2
+; ZVFHMIN-NEXT: sub sp, sp, a1
+; ZVFHMIN-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x04, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 4 * vlenb
+; ZVFHMIN-NEXT: vmv1r.v v7, v0
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
+; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
+; ZVFHMIN-NEXT: addi a1, sp, 16
+; ZVFHMIN-NEXT: vs4r.v v16, (a1) # Unknown-size Folded Spill
; ZVFHMIN-NEXT: csrr a2, vlenb
; ZVFHMIN-NEXT: slli a1, a2, 1
; ZVFHMIN-NEXT: sub a3, a0, a1
@@ -638,7 +646,9 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a2
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v12
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4
+; ZVFHMIN-NEXT: addi a2, sp, 16
+; ZVFHMIN-NEXT: vl4r.v v12, (a2) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
; ZVFHMIN-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma
@@ -648,11 +658,15 @@ define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB22_2:
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8
+; ZVFHMIN-NEXT: vmv1r.v v0, v7
; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vmv1r.v v0, v3
; ZVFHMIN-NEXT: vfsub.vv v16, v16, v24, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
+; ZVFHMIN-NEXT: csrr a0, vlenb
+; ZVFHMIN-NEXT: slli a0, a0, 2
+; ZVFHMIN-NEXT: add sp, sp, a0
+; ZVFHMIN-NEXT: addi sp, sp, 16
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 32 x half> %elt.head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
index 78f3792dbaf0..f9d992a40299 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc-vp.ll
@@ -671,9 +671,9 @@ define <vscale x 16 x float> @vfmacc_vv_nxv16f32(<vscale x 16 x half> %a, <vscal
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfmadd.vv v24, v16, v8, v0.t
; ZVFHMIN-NEXT: vmv.v.v v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
index ca0bbfd65ca2..c11867d55ba0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc-vp.ll
@@ -625,9 +625,9 @@ define <vscale x 16 x float> @vfnmacc_vv_nxv16f32(<vscale x 16 x half> %a, <vsca
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfnmadd.vv v24, v16, v8, v0.t
; ZVFHMIN-NEXT: vmv.v.v v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
@@ -710,17 +710,17 @@ define <vscale x 16 x float> @vfnmacc_vf_nxv16f32_commute(<vscale x 16 x half> %
;
; ZVFHMIN-LABEL: vfnmacc_vf_nxv16f32_commute:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv4r.v v24, v8
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmv.v.f v8, fa5
+; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24, v0.t
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4, v0.t
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfnmadd.vv v8, v24, v16, v0.t
+; ZVFHMIN-NEXT: vfnmadd.vv v24, v8, v16, v0.t
+; ZVFHMIN-NEXT: vmv.v.v v8, v24
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
index 2797ca2eb316..0ad7be47bcc8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac-vp.ll
@@ -601,9 +601,9 @@ define <vscale x 16 x float> @vfnmsac_vv_nxv16f32(<vscale x 16 x half> %a, <vsca
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v8, v0.t
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12, v0.t
-; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: addi a0, sp, 16
; ZVFHMIN-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfnmsub.vv v24, v16, v8, v0.t
; ZVFHMIN-NEXT: vmv.v.v v8, v24
; ZVFHMIN-NEXT: csrr a0, vlenb
@@ -682,17 +682,17 @@ define <vscale x 16 x float> @vfnmsac_vf_nxv16f32_commute(<vscale x 16 x half> %
;
; ZVFHMIN-LABEL: vfnmsac_vf_nxv16f32_commute:
; ZVFHMIN: # %bb.0:
-; ZVFHMIN-NEXT: vmv4r.v v24, v8
; ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; ZVFHMIN-NEXT: vsetvli a1, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfmv.v.f v8, fa5
+; ZVFHMIN-NEXT: vfmv.v.f v24, fa5
; ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v8
+; ZVFHMIN-NEXT: vfncvt.f.f.w v4, v24
; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v24, v0.t
-; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v4, v0.t
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v8, v0.t
+; ZVFHMIN-NEXT: vfwcvt.f.f.v v8, v4, v0.t
; ZVFHMIN-NEXT: vsetvli zero, zero, e32, m8, ta, ma
-; ZVFHMIN-NEXT: vfnmsub.vv v8, v24, v16, v0.t
+; ZVFHMIN-NEXT: vfnmsub.vv v24, v8, v16, v0.t
+; ZVFHMIN-NEXT: vmv.v.v v8, v24
; ZVFHMIN-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
%vb = shufflevector <vscale x 16 x half> %elt.head, <vscale x 16 x half> poison, <vscale x 16 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
index 5d0172430d15..77ef0a340270 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vitofp-sdnode.ll
@@ -449,12 +449,12 @@ define <vscale x 32 x half> @vsitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
; ZVFHMIN-NEXT: vmv.v.i v12, 0
; ZVFHMIN-NEXT: vmerge.vim v8, v12, -1, v0
; ZVFHMIN-NEXT: vfwcvt.f.x.v v16, v8
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: srli a0, a0, 2
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a0
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: vmerge.vim v12, v12, -1, v0
; ZVFHMIN-NEXT: vfwcvt.f.x.v v16, v12
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
@@ -478,12 +478,12 @@ define <vscale x 32 x half> @vuitofp_nxv32i1_nxv32f16(<vscale x 32 x i1> %va) {
; ZVFHMIN-NEXT: vmv.v.i v12, 0
; ZVFHMIN-NEXT: vmerge.vim v8, v12, 1, v0
; ZVFHMIN-NEXT: vfwcvt.f.xu.v v16, v8
-; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: csrr a0, vlenb
; ZVFHMIN-NEXT: srli a0, a0, 2
; ZVFHMIN-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; ZVFHMIN-NEXT: vslidedown.vx v0, v0, a0
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
+; ZVFHMIN-NEXT: vfncvt.f.f.w v8, v16
; ZVFHMIN-NEXT: vmerge.vim v12, v12, 1, v0
; ZVFHMIN-NEXT: vfwcvt.f.xu.v v16, v12
; ZVFHMIN-NEXT: vfncvt.f.f.w v12, v16
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
index a35fc874065a..8a7646798662 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmax-vp.ll
@@ -423,8 +423,8 @@ define <vscale x 128 x i8> @vmax_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
@@ -986,8 +986,8 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1046,8 +1046,8 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i3
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1084,8 +1084,8 @@ define <vscale x 32 x i32> @vmax_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i
; RV64-NEXT: slli a1, a1, 1
; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmax.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
index 1f620a44dbbc..1c74887c1b20 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmaxu-vp.ll
@@ -425,8 +425,8 @@ define <vscale x 128 x i8> @vmaxu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
@@ -988,8 +988,8 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1048,8 +1048,8 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1086,8 +1086,8 @@ define <vscale x 32 x i32> @vmaxu_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va,
; RV64-NEXT: slli a1, a1, 1
; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmaxu.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
index e7184921d87a..2e5b67c93fce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfeq.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -187,11 +190,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v10
-; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfeq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfeq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f16(
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfeq.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v12
-; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfeq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfeq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfeq.nxv16f16(
@@ -289,9 +294,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -340,9 +346,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -391,11 +398,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v10
-; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfeq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfeq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f32(
@@ -442,11 +450,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfeq.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v12
-; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfeq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfeq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfeq.nxv8f32(
@@ -493,9 +502,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfeq.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v9
+; CHECK-NEXT: vmfeq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfeq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -544,11 +554,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfeq.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v10
-; CHECK-NEXT: vmfeq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfeq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfeq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfeq.nxv2f64(
@@ -595,11 +606,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfeq.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfeq_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmfeq.vv v0, v8, v12
-; CHECK-NEXT: vmfeq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfeq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfeq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfeq.nxv4f64(
@@ -647,8 +659,8 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -694,8 +706,8 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -741,8 +753,8 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -788,8 +800,8 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -835,8 +847,8 @@ define <vscale x 16 x i1> @intrinsic_vmfeq_mask_vf_nxv16f16_f16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -882,8 +894,8 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -929,8 +941,8 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -976,8 +988,8 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1023,8 +1035,8 @@ define <vscale x 8 x i1> @intrinsic_vmfeq_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1070,8 +1082,8 @@ define <vscale x 1 x i1> @intrinsic_vmfeq_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfeq.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1117,8 +1129,8 @@ define <vscale x 2 x i1> @intrinsic_vmfeq_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfeq.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1164,8 +1176,8 @@ define <vscale x 4 x i1> @intrinsic_vmfeq_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfeq_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfeq.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
index a6dad9eaa4f3..b5ca47707c8a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfge.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -187,11 +190,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v10, v8
-; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmfle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f16(
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfge.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v12, v8
-; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmfle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfge.nxv16f16(
@@ -289,9 +294,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -340,9 +346,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -391,11 +398,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v10, v8
-; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmfle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f32(
@@ -442,11 +450,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfge.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v12, v8
-; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmfle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfge.nxv8f32(
@@ -493,9 +502,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfge.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v9, v8
+; CHECK-NEXT: vmfle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -544,11 +554,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfge.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v10, v8
-; CHECK-NEXT: vmfle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmfle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfge.nxv2f64(
@@ -595,11 +606,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfge.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v12, v8
-; CHECK-NEXT: vmfle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmfle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfge.nxv4f64(
@@ -647,8 +659,8 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -694,8 +706,8 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -741,8 +753,8 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -788,8 +800,8 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -835,8 +847,8 @@ define <vscale x 16 x i1> @intrinsic_vmfge_mask_vf_nxv16f16_f16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -882,8 +894,8 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -929,8 +941,8 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -976,8 +988,8 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1023,8 +1035,8 @@ define <vscale x 8 x i1> @intrinsic_vmfge_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1070,8 +1082,8 @@ define <vscale x 1 x i1> @intrinsic_vmfge_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfge.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1117,8 +1129,8 @@ define <vscale x 2 x i1> @intrinsic_vmfge_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfge.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1164,8 +1176,8 @@ define <vscale x 4 x i1> @intrinsic_vmfge_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfge_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfge.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
index f643a4036381..971249d38d1b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -187,11 +190,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v10, v8
-; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmflt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmflt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f16(
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfgt.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v12, v8
-; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmflt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmflt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfgt.nxv16f16(
@@ -289,9 +294,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -340,9 +346,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -391,11 +398,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v10, v8
-; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmflt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmflt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f32(
@@ -442,11 +450,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfgt.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v12, v8
-; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmflt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmflt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfgt.nxv8f32(
@@ -493,9 +502,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfgt.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v9, v8
+; CHECK-NEXT: vmflt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -544,11 +554,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfgt.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v10, v8
-; CHECK-NEXT: vmflt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmflt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmflt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfgt.nxv2f64(
@@ -595,11 +606,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfgt.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v12, v8
-; CHECK-NEXT: vmflt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmflt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmflt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfgt.nxv4f64(
@@ -647,8 +659,8 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -694,8 +706,8 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -741,8 +753,8 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -788,8 +800,8 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -835,8 +847,8 @@ define <vscale x 16 x i1> @intrinsic_vmfgt_mask_vf_nxv16f16_f16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -882,8 +894,8 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -929,8 +941,8 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -976,8 +988,8 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1023,8 +1035,8 @@ define <vscale x 8 x i1> @intrinsic_vmfgt_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1070,8 +1082,8 @@ define <vscale x 1 x i1> @intrinsic_vmfgt_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfgt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1117,8 +1129,8 @@ define <vscale x 2 x i1> @intrinsic_vmfgt_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfgt.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1164,8 +1176,8 @@ define <vscale x 4 x i1> @intrinsic_vmfgt_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfgt_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfgt.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
index 6c52364c1fbd..f19a181a365a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfle.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -187,11 +190,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v10
-; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f16(
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfle.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v12
-; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfle.nxv16f16(
@@ -289,9 +294,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -340,9 +346,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -391,11 +398,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v10
-; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f32(
@@ -442,11 +450,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfle.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v12
-; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfle.nxv8f32(
@@ -493,9 +502,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfle.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v9
+; CHECK-NEXT: vmfle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -544,11 +554,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfle.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v10
-; CHECK-NEXT: vmfle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfle.nxv2f64(
@@ -595,11 +606,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfle.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfle_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmfle.vv v0, v8, v12
-; CHECK-NEXT: vmfle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfle.nxv4f64(
@@ -647,8 +659,8 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -694,8 +706,8 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -741,8 +753,8 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -788,8 +800,8 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -835,8 +847,8 @@ define <vscale x 16 x i1> @intrinsic_vmfle_mask_vf_nxv16f16_f16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -882,8 +894,8 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -929,8 +941,8 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -976,8 +988,8 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1023,8 +1035,8 @@ define <vscale x 8 x i1> @intrinsic_vmfle_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1070,8 +1082,8 @@ define <vscale x 1 x i1> @intrinsic_vmfle_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfle.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1117,8 +1129,8 @@ define <vscale x 2 x i1> @intrinsic_vmfle_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfle.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1164,8 +1176,8 @@ define <vscale x 4 x i1> @intrinsic_vmfle_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfle_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfle.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
index 37a9c6b081a1..0a0464221933 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmflt.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -187,11 +190,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v10
-; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmflt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmflt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f16(
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmflt.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v12
-; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmflt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmflt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmflt.nxv16f16(
@@ -289,9 +294,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -340,9 +346,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -391,11 +398,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v10
-; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmflt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmflt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f32(
@@ -442,11 +450,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmflt.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v12
-; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmflt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmflt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmflt.nxv8f32(
@@ -493,9 +502,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmflt.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v9
+; CHECK-NEXT: vmflt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmflt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -544,11 +554,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmflt.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v10
-; CHECK-NEXT: vmflt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmflt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmflt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmflt.nxv2f64(
@@ -595,11 +606,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmflt.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmflt_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmflt.vv v0, v8, v12
-; CHECK-NEXT: vmflt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmflt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmflt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmflt.nxv4f64(
@@ -647,8 +659,8 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -694,8 +706,8 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -741,8 +753,8 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -788,8 +800,8 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -835,8 +847,8 @@ define <vscale x 16 x i1> @intrinsic_vmflt_mask_vf_nxv16f16_f16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -882,8 +894,8 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -929,8 +941,8 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -976,8 +988,8 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1023,8 +1035,8 @@ define <vscale x 8 x i1> @intrinsic_vmflt_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1070,8 +1082,8 @@ define <vscale x 1 x i1> @intrinsic_vmflt_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmflt.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1117,8 +1129,8 @@ define <vscale x 2 x i1> @intrinsic_vmflt_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmflt.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1164,8 +1176,8 @@ define <vscale x 4 x i1> @intrinsic_vmflt_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmflt_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmflt.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
index 5defce42091e..520099247e0f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmfne.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f16(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16(<vscale x 1 x i1> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f16_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f16(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16(<vscale x 2 x i1> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f16_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f16(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16(<vscale x 4 x i1> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -187,11 +190,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f16(
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16(<vscale x 8 x i1> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f16_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v10
-; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f16(
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmfne.mask.nxv16f16(
define <vscale x 16 x i1> @intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16(<vscale x 16 x i1> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x half> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv16f16_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v12
-; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmfne.nxv16f16(
@@ -289,9 +294,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f32(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32(<vscale x 1 x i1> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f32_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -340,9 +346,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f32(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32(<vscale x 2 x i1> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -391,11 +398,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f32(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32(<vscale x 4 x i1> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f32_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v10
-; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f32(
@@ -442,11 +450,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmfne.mask.nxv8f32(
define <vscale x 8 x i1> @intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32(<vscale x 8 x i1> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x float> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv8f32_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v12
-; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmfne.nxv8f32(
@@ -493,9 +502,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmfne.mask.nxv1f64(
define <vscale x 1 x i1> @intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64(<vscale x 1 x i1> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v9
+; CHECK-NEXT: vmfne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmfne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -544,11 +554,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmfne.mask.nxv2f64(
define <vscale x 2 x i1> @intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64(<vscale x 2 x i1> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv2f64_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v10
-; CHECK-NEXT: vmfne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmfne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmfne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmfne.nxv2f64(
@@ -595,11 +606,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmfne.mask.nxv4f64(
define <vscale x 4 x i1> @intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64(<vscale x 4 x i1> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x double> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmfne_mask_vv_nxv4f64_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmfne.vv v0, v8, v12
-; CHECK-NEXT: vmfne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmfne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmfne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmfne.nxv4f64(
@@ -647,8 +659,8 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f16_f16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -694,8 +706,8 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f16_f16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -741,8 +753,8 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f16_f16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -788,8 +800,8 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f16_f16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -835,8 +847,8 @@ define <vscale x 16 x i1> @intrinsic_vmfne_mask_vf_nxv16f16_f16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv16f16_f16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -882,8 +894,8 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f32_f32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -929,8 +941,8 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f32_f32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -976,8 +988,8 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f32_f32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1023,8 +1035,8 @@ define <vscale x 8 x i1> @intrinsic_vmfne_mask_vf_nxv8f32_f32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv8f32_f32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1070,8 +1082,8 @@ define <vscale x 1 x i1> @intrinsic_vmfne_mask_vf_nxv1f64_f64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv1f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmfne.vf v10, v8, fa0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1117,8 +1129,8 @@ define <vscale x 2 x i1> @intrinsic_vmfne_mask_vf_nxv2f64_f64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv2f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmfne.vf v11, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1164,8 +1176,8 @@ define <vscale x 4 x i1> @intrinsic_vmfne_mask_vf_nxv4f64_f64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmfne_mask_vf_nxv4f64_f64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmfne.vf v13, v8, fa0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
index 8fabf93356ae..1c71242c3c7d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll
@@ -423,8 +423,8 @@ define <vscale x 128 x i8> @vmin_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
@@ -986,8 +986,8 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1046,8 +1046,8 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i3
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1084,8 +1084,8 @@ define <vscale x 32 x i32> @vmin_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va, i
; RV64-NEXT: slli a1, a1, 1
; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmin.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
index 8ec85e545a0f..6d89a9777cf9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vminu-vp.ll
@@ -425,8 +425,8 @@ define <vscale x 128 x i8> @vminu_vx_nxv128i8(<vscale x 128 x i8> %va, i8 %b, <v
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB34_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 128 x i8> poison, i8 %b, i32 0
@@ -988,8 +988,8 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32(<vscale x 32 x i32> %va, i32 %b, <
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1048,8 +1048,8 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx8(<vscale x 32 x i32> %va, i
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
@@ -1086,8 +1086,8 @@ define <vscale x 32 x i32> @vminu_vx_nxv32i32_evl_nx16(<vscale x 32 x i32> %va,
; RV64-NEXT: slli a1, a1, 1
; RV64-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV64-NEXT: vminu.vx v8, v8, a0, v0.t
-; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vminu.vx v16, v16, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 32 x i32> poison, i32 %b, i32 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
index 2d6e958fcd0b..14a1f084c398 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsbf.ll
@@ -32,8 +32,8 @@ define <vscale x 1 x i1> @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -74,8 +74,8 @@ define <vscale x 2 x i1> @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -116,8 +116,8 @@ define <vscale x 4 x i1> @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -158,8 +158,8 @@ define <vscale x 8 x i1> @intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -200,8 +200,8 @@ define <vscale x 16 x i1> @intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -242,8 +242,8 @@ define <vscale x 32 x i1> @intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -284,8 +284,8 @@ define <vscale x 64 x i1> @intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmsbf.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
index cc6c1f585bb7..9f181f7a30eb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmseq.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmseq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmseq.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmseq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmseq.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmseq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmseq.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmseq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmseq.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmseq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmseq.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmseq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmseq.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmseq.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v9
+; CHECK-NEXT: vmseq.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmseq.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmseq.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v10
-; CHECK-NEXT: vmseq.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmseq.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmseq.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmseq.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmseq.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmseq_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmseq.vv v0, v8, v12
-; CHECK-NEXT: vmseq.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmseq.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmseq.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmseq.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vx_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmseq.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmseq.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmseq.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmseq.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmseq_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmseq.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmseq_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmseq_mask_vi_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmseq_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmseq_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmseq.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmseq_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmseq.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmseq_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmseq_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmseq.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
index c8f9b60a3f2d..75fc407abbc2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsge.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v9, v8
+; CHECK-NEXT: vmsle.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v10, v8
-; CHECK-NEXT: vmsle.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsle.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v12, v8
-; CHECK-NEXT: vmsle.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsle.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
@@ -954,8 +972,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1002,8 +1020,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1050,8 +1068,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1098,8 +1116,8 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1146,8 +1164,8 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v11, v10
; CHECK-NEXT: ret
@@ -1194,8 +1212,8 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v13, v12
; CHECK-NEXT: ret
@@ -1242,8 +1260,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1290,8 +1308,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1338,8 +1356,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1386,8 +1404,8 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v11, v10
; CHECK-NEXT: ret
@@ -1434,8 +1452,8 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v13, v12
; CHECK-NEXT: ret
@@ -1482,8 +1500,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1530,8 +1548,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1578,8 +1596,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v11, v10
; CHECK-NEXT: ret
@@ -1626,8 +1644,8 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v13, v12
; CHECK-NEXT: ret
@@ -1701,8 +1719,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmslt.vx v10, v8, a0, v0.t
; RV64-NEXT: vmxor.mm v0, v10, v9
; RV64-NEXT: ret
@@ -1776,8 +1794,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmslt.vx v11, v8, a0, v0.t
; RV64-NEXT: vmxor.mm v0, v11, v10
; RV64-NEXT: ret
@@ -1851,8 +1869,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmslt.vx v13, v8, a0, v0.t
; RV64-NEXT: vmxor.mm v0, v13, v12
; RV64-NEXT: ret
@@ -1886,8 +1904,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, -15, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1921,8 +1939,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, -13, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1956,8 +1974,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, -11, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1991,8 +2009,8 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, -9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2026,8 +2044,8 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, -7, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2061,8 +2079,8 @@ define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, -5, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2096,8 +2114,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, -3, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2131,8 +2149,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, -1, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2166,8 +2184,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2201,8 +2219,8 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 2, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2236,8 +2254,8 @@ define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 4, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2271,8 +2289,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 6, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2306,8 +2324,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2341,8 +2359,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2376,8 +2394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 12, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2411,8 +2429,8 @@ define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 8, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2446,8 +2464,8 @@ define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 8, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2481,8 +2499,8 @@ define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 8, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
index b6c6d9e90f61..5568c1e9b1cf 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgeu.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgeu.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgeu.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgeu.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgeu.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgeu.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgeu.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v9, v8
+; CHECK-NEXT: vmsleu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgeu.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v10, v8
-; CHECK-NEXT: vmsleu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgeu.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgeu.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v12, v8
-; CHECK-NEXT: vmsleu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgeu.nxv4i64(
@@ -954,8 +972,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1002,8 +1020,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1050,8 +1068,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1098,8 +1116,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1146,8 +1164,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v11, v10
; CHECK-NEXT: ret
@@ -1194,8 +1212,8 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v13, v12
; CHECK-NEXT: ret
@@ -1242,8 +1260,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1290,8 +1308,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1338,8 +1356,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1386,8 +1404,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v11, v10
; CHECK-NEXT: ret
@@ -1434,8 +1452,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vx_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v13, v12
; CHECK-NEXT: ret
@@ -1482,8 +1500,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1530,8 +1548,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v10, v9
; CHECK-NEXT: ret
@@ -1578,8 +1596,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v11, v10
; CHECK-NEXT: ret
@@ -1626,8 +1644,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmxor.mm v0, v13, v12
; CHECK-NEXT: ret
@@ -1701,8 +1719,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsltu.vx v10, v8, a0, v0.t
; RV64-NEXT: vmxor.mm v0, v10, v9
; RV64-NEXT: ret
@@ -1776,8 +1794,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsltu.vx v11, v8, a0, v0.t
; RV64-NEXT: vmxor.mm v0, v11, v10
; RV64-NEXT: ret
@@ -1851,8 +1869,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsltu.vx v13, v8, a0, v0.t
; RV64-NEXT: vmxor.mm v0, v13, v12
; RV64-NEXT: ret
@@ -1886,8 +1904,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, -15, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1921,8 +1939,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, -13, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1956,8 +1974,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, -11, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1991,8 +2009,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, -9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2026,8 +2044,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, -7, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2061,8 +2079,8 @@ define <vscale x 32 x i1> @intrinsic_vmsgeu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, -5, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2096,8 +2114,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, -3, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2178,8 +2196,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2213,8 +2231,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, 2, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2248,8 +2266,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgeu_mask_vi_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, 4, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2283,8 +2301,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 6, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2318,8 +2336,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 8, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2353,8 +2371,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, 10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2388,8 +2406,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgeu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, 12, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2423,8 +2441,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgeu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 14, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2458,8 +2476,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgeu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, -16, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2493,8 +2511,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgeu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, -14, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
index dfd7096a65eb..f1fa6484d976 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmslt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgt.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmslt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgt.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmslt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgt.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmslt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgt.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmslt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgt.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmslt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgt.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgt.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v9, v8
+; CHECK-NEXT: vmslt.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgt.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v10, v8
-; CHECK-NEXT: vmslt.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmslt.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgt.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgt.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v12, v8
-; CHECK-NEXT: vmslt.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmslt.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgt.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vx_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsgt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsgt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsgt.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsgt.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsgt_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsgt.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmsgt_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgt_mask_vi_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgt_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgt_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsgt.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgt_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsgt.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgt_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgt_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsgt.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
index 8826be03bbeb..de7a0ad87be2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsgtu.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsgtu.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsgtu.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsgtu.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsgtu.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsgtu.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsgtu.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v9, v8
+; CHECK-NEXT: vmsltu.vv v8, v9, v8
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v10, v9, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsgtu.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v10, v8
-; CHECK-NEXT: vmsltu.vv v14, v12, v10, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v10, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v12, v10, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsgtu.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsgtu.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v12, v8
-; CHECK-NEXT: vmsltu.vv v20, v16, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v12, v8
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v16, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsgtu.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vx_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsgtu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsgtu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsgtu.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsgtu.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsgtu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsgtu.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmsgtu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmsgtu_mask_vi_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmsgtu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmsgtu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsgtu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmsgtu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsgtu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmsgtu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsgtu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsgtu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
index 8ce9a3020b7a..05d402afc934 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsif.ll
@@ -32,8 +32,8 @@ define <vscale x 1 x i1> @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -74,8 +74,8 @@ define <vscale x 2 x i1> @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -116,8 +116,8 @@ define <vscale x 4 x i1> @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -158,8 +158,8 @@ define <vscale x 8 x i1> @intrinsic_vmsif_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -200,8 +200,8 @@ define <vscale x 16 x i1> @intrinsic_vmsif_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -242,8 +242,8 @@ define <vscale x 32 x i1> @intrinsic_vmsif_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -284,8 +284,8 @@ define <vscale x 64 x i1> @intrinsic_vmsif_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmsif.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
index 5d5a28edbfe1..f54aef3ed405 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsle.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsle.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsle.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsle.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsle.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsle.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsle.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsle.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v9
+; CHECK-NEXT: vmsle.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsle.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsle.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v10
-; CHECK-NEXT: vmsle.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsle.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsle.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsle.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsle.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsle_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsle.vv v0, v8, v12
-; CHECK-NEXT: vmsle.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsle.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsle.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsle.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vx_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsle.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsle.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsle.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsle.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsle_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsle.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmsle_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmsle_mask_vi_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmsle_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmsle_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmsle_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmsle_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsle_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
index c58ac2d07183..540577247484 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsleu.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsleu.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsleu.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsleu.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsleu.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsleu.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsleu.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsleu.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v9
+; CHECK-NEXT: vmsleu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsleu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsleu.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v10
-; CHECK-NEXT: vmsleu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsleu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsleu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsleu.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsleu.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsleu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsleu.vv v0, v8, v12
-; CHECK-NEXT: vmsleu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsleu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsleu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsleu.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vx_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsleu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsleu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsleu.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsleu.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsleu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsleu.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmsleu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmsleu_mask_vi_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmsleu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmsleu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmsleu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmsleu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsleu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
index 6c6e580b043d..554d25172d4f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmslt.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: vmslt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmslt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: vmslt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmslt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: vmslt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmslt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: vmslt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmslt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: vmslt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmslt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: vmslt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmslt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v9
+; CHECK-NEXT: vmslt.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmslt.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v10
-; CHECK-NEXT: vmslt.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmslt.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmslt.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmslt.vv v0, v8, v12
-; CHECK-NEXT: vmslt.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmslt.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmslt.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmslt.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmslt.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmslt.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmslt.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmslt.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, -15, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, -13, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, -11, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, -9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, -7, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, -5, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, -3, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmslt.vx v10, v8, zero, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 2, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 4, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 6, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 12, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsle.vi v10, v8, 8, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsle.vi v11, v8, 8, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsle.vi v13, v8, 8, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
index 76f3e449ab58..7a8efa6c80fb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: vmsltu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsltu.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: vmsltu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsltu.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: vmsltu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsltu.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: vmsltu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsltu.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: vmsltu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsltu.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: vmsltu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsltu.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsltu.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v9
+; CHECK-NEXT: vmsltu.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsltu.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsltu.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v10
-; CHECK-NEXT: vmsltu.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsltu.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsltu.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsltu.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsltu.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsltu_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsltu.vv v0, v8, v12
-; CHECK-NEXT: vmsltu.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsltu.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsltu.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vx_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vx_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsltu.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsltu.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsltu.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsltu.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsltu_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsltu.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, -15, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, -13, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, -11, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, -9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i8_i8(<vscale x 16 x i1
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, -7, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmsltu_mask_vi_nxv32i8_i8(<vscale x 32 x i1
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, -5, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, -3, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsltu.vx v10, v8, zero, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, 2, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmsltu_mask_vi_nxv16i16_i16(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, 4, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 6, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 8, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, 10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmsltu_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, 12, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmsltu_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsleu.vi v10, v8, 14, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmsltu_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsleu.vi v11, v8, -16, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmsltu_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsleu.vi v13, v8, -14, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
index 161c1bc4314f..bd6bd8a804bc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsne.ll
@@ -34,9 +34,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i8(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i8_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -85,9 +86,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i8(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i8_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -136,9 +138,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i8(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i8_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -187,9 +190,10 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i8(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -238,11 +242,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i8(
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i8_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i8(
@@ -289,11 +294,12 @@ declare <vscale x 32 x i1> @llvm.riscv.vmsne.mask.nxv32i8(
define <vscale x 32 x i1> @intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv32i8_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 32 x i1> @llvm.riscv.vmsne.nxv32i8(
@@ -340,9 +346,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i16(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i16_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -391,9 +398,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i16(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i16_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -442,9 +450,10 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i16(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -493,11 +502,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i16(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i16_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i16(
@@ -544,11 +554,12 @@ declare <vscale x 16 x i1> @llvm.riscv.vmsne.mask.nxv16i16(
define <vscale x 16 x i1> @intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 16 x i1> @llvm.riscv.vmsne.nxv16i16(
@@ -595,9 +606,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i32(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i32_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -646,9 +658,10 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i32(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -697,11 +710,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i32(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i32_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i32(
@@ -748,11 +762,12 @@ declare <vscale x 8 x i1> @llvm.riscv.vmsne.mask.nxv8i32(
define <vscale x 8 x i1> @intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv8i32_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 8 x i1> @llvm.riscv.vmsne.nxv8i32(
@@ -799,9 +814,10 @@ declare <vscale x 1 x i1> @llvm.riscv.vmsne.mask.nxv1i64(
define <vscale x 1 x i1> @intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v11, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v9
+; CHECK-NEXT: vmsne.vv v8, v8, v9
+; CHECK-NEXT: vmv1r.v v11, v0
+; CHECK-NEXT: vmv.v.v v0, v8
; CHECK-NEXT: vmsne.vv v11, v9, v10, v0.t
; CHECK-NEXT: vmv.v.v v0, v11
; CHECK-NEXT: ret
@@ -850,11 +866,12 @@ declare <vscale x 2 x i1> @llvm.riscv.vmsne.mask.nxv2i64(
define <vscale x 2 x i1> @intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv2i64_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v14, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v10
-; CHECK-NEXT: vmsne.vv v14, v10, v12, v0.t
+; CHECK-NEXT: vmsne.vv v14, v8, v10
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v14
+; CHECK-NEXT: vmsne.vv v8, v10, v12, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 2 x i1> @llvm.riscv.vmsne.nxv2i64(
@@ -901,11 +918,12 @@ declare <vscale x 4 x i1> @llvm.riscv.vmsne.mask.nxv4i64(
define <vscale x 4 x i1> @intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, iXLen %4) nounwind {
; CHECK-LABEL: intrinsic_vmsne_mask_vv_nxv4i64_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vmv1r.v v20, v0
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
-; CHECK-NEXT: vmsne.vv v0, v8, v12
-; CHECK-NEXT: vmsne.vv v20, v12, v16, v0.t
+; CHECK-NEXT: vmsne.vv v20, v8, v12
+; CHECK-NEXT: vmv1r.v v8, v0
; CHECK-NEXT: vmv1r.v v0, v20
+; CHECK-NEXT: vmsne.vv v8, v12, v16, v0.t
+; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
entry:
%mask = call <vscale x 4 x i1> @llvm.riscv.vmsne.nxv4i64(
@@ -953,8 +971,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1000,8 +1018,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1047,8 +1065,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1094,8 +1112,8 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1141,8 +1159,8 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1188,8 +1206,8 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vx_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1235,8 +1253,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1282,8 +1300,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1329,8 +1347,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1376,8 +1394,8 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1423,8 +1441,8 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vx_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1470,8 +1488,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1517,8 +1535,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -1564,8 +1582,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
; CHECK-NEXT: vmsne.vx v11, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -1611,8 +1629,8 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vx_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vx_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
; CHECK-NEXT: vmsne.vx v13, v8, a0, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -1685,8 +1703,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vx_nxv1i64_i64(<vscale x 1 x i1>
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv1i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v10, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmv1r.v v0, v9
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
; RV64-NEXT: vmsne.vx v10, v8, a0, v0.t
; RV64-NEXT: vmv.v.v v0, v10
; RV64-NEXT: ret
@@ -1759,8 +1777,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vx_nxv2i64_i64(<vscale x 2 x i1>
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv2i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v11, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmv1r.v v0, v10
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
; RV64-NEXT: vmsne.vx v11, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v11
; RV64-NEXT: ret
@@ -1833,8 +1851,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vx_nxv4i64_i64(<vscale x 4 x i1>
; RV64-LABEL: intrinsic_vmsne_mask_vx_nxv4i64_i64:
; RV64: # %bb.0: # %entry
; RV64-NEXT: vmv1r.v v13, v0
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
; RV64-NEXT: vmsne.vx v13, v8, a0, v0.t
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: ret
@@ -1868,8 +1886,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1903,8 +1921,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1938,8 +1956,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -1973,8 +1991,8 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2008,8 +2026,8 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i8_i8(<vscale x 16 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2043,8 +2061,8 @@ define <vscale x 32 x i1> @intrinsic_vmsne_mask_vi_nxv32i8_i8(<vscale x 32 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv32i8_i8:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2078,8 +2096,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i16_i16(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2113,8 +2131,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i16_i16(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2148,8 +2166,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i16_i16(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2183,8 +2201,8 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i16_i16(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2218,8 +2236,8 @@ define <vscale x 16 x i1> @intrinsic_vmsne_mask_vi_nxv16i16_i16(<vscale x 16 x i
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv16i16_i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2253,8 +2271,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i32_i32(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -2288,8 +2306,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i32_i32(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2323,8 +2341,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i32_i32(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2358,8 +2376,8 @@ define <vscale x 8 x i1> @intrinsic_vmsne_mask_vi_nxv8i32_i32(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv8i32_i32:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
@@ -2393,8 +2411,8 @@ define <vscale x 1 x i1> @intrinsic_vmsne_mask_vi_nxv1i64_i64(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv1i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vmsne.vi v10, v8, 9, v0.t
; CHECK-NEXT: vmv.v.v v0, v10
; CHECK-NEXT: ret
@@ -2428,8 +2446,8 @@ define <vscale x 2 x i1> @intrinsic_vmsne_mask_vi_nxv2i64_i64(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv2i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v11, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
; CHECK-NEXT: vmsne.vi v11, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v11
; CHECK-NEXT: ret
@@ -2463,8 +2481,8 @@ define <vscale x 4 x i1> @intrinsic_vmsne_mask_vi_nxv4i64_i64(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsne_mask_vi_nxv4i64_i64:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v13, v0
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
; CHECK-NEXT: vmsne.vi v13, v8, 9, v0.t
; CHECK-NEXT: vmv1r.v v0, v13
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
index f6f90eddcd8c..0c60681ea8de 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsof.ll
@@ -32,8 +32,8 @@ define <vscale x 1 x i1> @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1(<vscale x 1 x i1>
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -74,8 +74,8 @@ define <vscale x 2 x i1> @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1(<vscale x 2 x i1>
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -116,8 +116,8 @@ define <vscale x 4 x i1> @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1(<vscale x 4 x i1>
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -158,8 +158,8 @@ define <vscale x 8 x i1> @intrinsic_vmsof_mask_m_nxv8i1_nxv8i1(<vscale x 8 x i1>
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv8i1_nxv8i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -200,8 +200,8 @@ define <vscale x 16 x i1> @intrinsic_vmsof_mask_m_nxv16i1_nxv16i1(<vscale x 16 x
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv16i1_nxv16i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -242,8 +242,8 @@ define <vscale x 32 x i1> @intrinsic_vmsof_mask_m_nxv32i1_nxv32i1(<vscale x 32 x
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv32i1_nxv32i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
@@ -284,8 +284,8 @@ define <vscale x 64 x i1> @intrinsic_vmsof_mask_m_nxv64i1_nxv64i1(<vscale x 64 x
; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv64i1_nxv64i1:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv1r.v v10, v0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, tu, mu
; CHECK-NEXT: vmsof.m v10, v8, v0.t
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
index 8b368bfaab08..f7ca65801dc8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-cttz-elts.ll
@@ -6,8 +6,8 @@ define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
; RV32-LABEL: bool_vec:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v9, v0
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vmv1r.v v0, v8
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vfirst.m a1, v9, v0.t
; RV32-NEXT: bltz a1, .LBB0_2
; RV32-NEXT: # %bb.1:
@@ -20,8 +20,8 @@ define iXLen @bool_vec(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m, i32 %evl) {
; RV64-NEXT: vmv1r.v v9, v0
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vmv1r.v v0, v8
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vfirst.m a1, v9, v0.t
; RV64-NEXT: bltz a1, .LBB0_2
; RV64-NEXT: # %bb.1:
@@ -36,8 +36,8 @@ define iXLen @bool_vec_zero_poison(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m,
; RV32-LABEL: bool_vec_zero_poison:
; RV32: # %bb.0:
; RV32-NEXT: vmv1r.v v9, v0
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vmv1r.v v0, v8
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vfirst.m a0, v9, v0.t
; RV32-NEXT: ret
;
@@ -46,8 +46,8 @@ define iXLen @bool_vec_zero_poison(<vscale x 2 x i1> %src, <vscale x 2 x i1> %m,
; RV64-NEXT: vmv1r.v v9, v0
; RV64-NEXT: slli a0, a0, 32
; RV64-NEXT: srli a0, a0, 32
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vmv1r.v v0, v8
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vfirst.m a0, v9, v0.t
; RV64-NEXT: ret
%r = call iXLen @llvm.vp.cttz.elts.iXLen.nxv2i1(<vscale x 2 x i1> %src, i1 1, <vscale x 2 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
index 8b1660283cb7..d0f2ce1ca800 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-int.ll
@@ -450,14 +450,14 @@ define <vscale x 64 x i8> @test_vp_reverse_nxv64i8_masked(<vscale x 64 x i8> %sr
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma
; CHECK-NEXT: vid.v v16
-; CHECK-NEXT: vrsub.vx v24, v16, a2
+; CHECK-NEXT: vrsub.vx v16, v16, a2
; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v20, v8, v24
-; CHECK-NEXT: vrgatherei16.vv v16, v12, v24
+; CHECK-NEXT: vrgatherei16.vv v28, v8, v16
+; CHECK-NEXT: vrgatherei16.vv v24, v12, v16
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub a1, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vslidedown.vx v8, v16, a1, v0.t
+; CHECK-NEXT: vslidedown.vx v8, v24, a1, v0.t
; CHECK-NEXT: ret
%dst = call <vscale x 64 x i8> @llvm.experimental.vp.reverse.nxv64i8(<vscale x 64 x i8> %src, <vscale x 64 x i1> %mask, i32 %evl)
ret <vscale x 64 x i8> %dst
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll
index a30ebf2d33b5..7f81b99eb033 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask-fixed-vectors.ll
@@ -8,8 +8,8 @@ define <2 x i1> @test_vp_reverse_v2i1_masked(<2 x i1> %src, <2 x i1> %mask, i32
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -45,8 +45,8 @@ define <4 x i1> @test_vp_reverse_v4i1_masked(<4 x i1> %src, <4 x i1> %mask, i32
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -82,8 +82,8 @@ define <8 x i1> @test_vp_reverse_v8i1_masked(<8 x i1> %src, <8 x i1> %mask, i32
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -119,8 +119,8 @@ define <16 x i1> @test_vp_reverse_v16i1_masked(<16 x i1> %src, <16 x i1> %mask,
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll
index ceb6a164e20d..acf7d16bda98 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-reverse-mask.ll
@@ -7,8 +7,8 @@ define <vscale x 1 x i1> @test_vp_reverse_nxv1i1_masked(<vscale x 1 x i1> %src,
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -44,8 +44,8 @@ define <vscale x 2 x i1> @test_vp_reverse_nxv2i1_masked(<vscale x 2 x i1> %src,
; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -81,8 +81,8 @@ define <vscale x 4 x i1> @test_vp_reverse_nxv4i1_masked(<vscale x 4 x i1> %src,
; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -118,8 +118,8 @@ define <vscale x 8 x i1> @test_vp_reverse_nxv8i1_masked(<vscale x 8 x i1> %src,
; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmv.v.i v9, 0
; CHECK-NEXT: vmerge.vim v9, v9, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vid.v v10, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v10, v10, a0, v0.t
@@ -155,8 +155,8 @@ define <vscale x 16 x i1> @test_vp_reverse_nxv16i1_masked(<vscale x 16 x i1> %sr
; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmv.v.i v10, 0
; CHECK-NEXT: vmerge.vim v10, v10, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m4, ta, ma
; CHECK-NEXT: vid.v v12, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v12, v12, a0, v0.t
@@ -193,8 +193,8 @@ define <vscale x 32 x i1> @test_vp_reverse_nxv32i1_masked(<vscale x 32 x i1> %sr
; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmv.v.i v12, 0
; CHECK-NEXT: vmerge.vim v12, v12, 1, v0
-; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, zero, e16, m8, ta, ma
; CHECK-NEXT: vid.v v16, v0.t
; CHECK-NEXT: addi a0, a0, -1
; CHECK-NEXT: vrsub.vx v16, v16, a0, v0.t
@@ -242,8 +242,8 @@ define <vscale x 64 x i1> @test_vp_reverse_nxv64i1_masked(<vscale x 64 x i1> %sr
; CHECK-NEXT: vrgatherei16.vv v16, v28, v0
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub a1, a1, a0
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vx v16, v16, a1, v0.t
; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
@@ -263,14 +263,14 @@ define <vscale x 64 x i1> @test_vp_reverse_nxv64i1(<vscale x 64 x i1> %src, i32
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: vsetvli a3, zero, e16, m8, ta, ma
; CHECK-NEXT: vid.v v16
-; CHECK-NEXT: vrsub.vx v24, v16, a2
+; CHECK-NEXT: vrsub.vx v16, v16, a2
; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma
-; CHECK-NEXT: vrgatherei16.vv v20, v8, v24
-; CHECK-NEXT: vrgatherei16.vv v16, v12, v24
+; CHECK-NEXT: vrgatherei16.vv v28, v8, v16
+; CHECK-NEXT: vrgatherei16.vv v24, v12, v16
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: sub a1, a1, a0
; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vslidedown.vx v8, v16, a1
+; CHECK-NEXT: vslidedown.vx v8, v24, a1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
index ce0ae2022885..9496cd82947d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-fixed-vectors.ll
@@ -68,8 +68,8 @@ define <2 x i1> @test_vp_splice_v2i1_masked(<2 x i1> %va, <2 x i1> %vb, <2 x i1>
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -141,8 +141,8 @@ define <4 x i1> @test_vp_splice_v4i1_masked(<4 x i1> %va, <4 x i1> %vb, <4 x i1>
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -214,8 +214,8 @@ define <8 x i1> @test_vp_splice_v8i1_masked(<8 x i1> %va, <8 x i1> %vb, <8 x i1>
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -287,8 +287,8 @@ define <16 x i1> @test_vp_splice_v16i1_masked(<16 x i1> %va, <16 x i1> %vb, <16
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
index 668cff234293..902763082522 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vp-splice-mask-vectors.ll
@@ -71,8 +71,8 @@ define <vscale x 1 x i1> @test_vp_splice_nxv1i1_masked(<vscale x 1 x i1> %va, <v
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -144,8 +144,8 @@ define <vscale x 2 x i1> @test_vp_splice_nxv2i1_masked(<vscale x 2 x i1> %va, <v
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -217,8 +217,8 @@ define <vscale x 4 x i1> @test_vp_splice_nxv4i1_masked(<vscale x 4 x i1> %va, <v
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -290,8 +290,8 @@ define <vscale x 8 x i1> @test_vp_splice_nxv8i1_masked(<vscale x 8 x i1> %va, <v
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v11, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t
@@ -363,8 +363,8 @@ define <vscale x 16 x i1> @test_vp_splice_nxv16i1_masked(<vscale x 16 x i1> %va,
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v10, v14, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
; CHECK-NEXT: vslideup.vx v10, v12, a0, v0.t
@@ -437,8 +437,8 @@ define <vscale x 32 x i1> @test_vp_splice_nxv32i1_masked(<vscale x 32 x i1> %va,
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v16, v16, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vslidedown.vi v16, v16, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
; CHECK-NEXT: vslideup.vx v16, v12, a0, v0.t
@@ -511,8 +511,8 @@ define <vscale x 64 x i1> @test_vp_splice_nxv64i1_masked(<vscale x 64 x i1> %va,
; CHECK-NEXT: vmv1r.v v0, v10
; CHECK-NEXT: vmerge.vim v24, v24, 1, v0
; CHECK-NEXT: addi a0, a0, -5
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vslidedown.vi v24, v24, 5, v0.t
; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
; CHECK-NEXT: vslideup.vx v24, v16, a0, v0.t
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index c86fee630593..c0d7ecf74956 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -274,8 +274,8 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
; RV32-NEXT: .LBB12_2:
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v16, v8
-; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
@@ -308,8 +308,8 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
; RV64-NEXT: .LBB12_2:
; RV64-NEXT: vsetvli a6, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v10
-; RV64-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; RV64-NEXT: vmv1r.v v0, v13
+; RV64-NEXT: vsetvli zero, a5, e8, m1, ta, ma
; RV64-NEXT: vluxei64.v v10, (a0), v16, v0.t
; RV64-NEXT: bltu a1, a3, .LBB12_4
; RV64-NEXT: # %bb.3:
@@ -331,8 +331,8 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
; RV64-NEXT: .LBB12_6:
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
-; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, ptr %base, <vscale x 32 x i8> %idxs
@@ -2269,18 +2269,18 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
; RV32-NEXT: sub a2, a0, a1
; RV32-NEXT: sltu a3, a0, a2
; RV32-NEXT: addi a3, a3, -1
+; RV32-NEXT: srli a4, a1, 3
+; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
+; RV32-NEXT: vslidedown.vx v0, v0, a4
; RV32-NEXT: and a2, a3, a2
-; RV32-NEXT: srli a3, a1, 3
-; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
-; RV32-NEXT: vslidedown.vx v0, v0, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (zero), v12, v0.t
; RV32-NEXT: bltu a0, a1, .LBB102_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB102_2:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v24, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v24
; RV32-NEXT: ret
@@ -2292,18 +2292,18 @@ define <vscale x 16 x double> @vpgather_nxv16f64(<vscale x 16 x ptr> %ptrs, <vsc
; RV64-NEXT: sub a2, a0, a1
; RV64-NEXT: sltu a3, a0, a2
; RV64-NEXT: addi a3, a3, -1
+; RV64-NEXT: srli a4, a1, 3
+; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
+; RV64-NEXT: vslidedown.vx v0, v0, a4
; RV64-NEXT: and a2, a3, a2
-; RV64-NEXT: srli a3, a1, 3
-; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
-; RV64-NEXT: vslidedown.vx v0, v0, a3
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (zero), v16, v0.t
; RV64-NEXT: bltu a0, a1, .LBB102_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB102_2:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <vscale x 16 x double> @llvm.vp.gather.nxv16f64.nxv16p0(<vscale x 16 x ptr> %ptrs, <vscale x 16 x i1> %m, i32 %evl)
@@ -2319,20 +2319,20 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
; RV32-NEXT: vsll.vi v24, v16, 3
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: sub a3, a1, a2
-; RV32-NEXT: sltu a4, a1, a3
-; RV32-NEXT: addi a4, a4, -1
-; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: srli a4, a2, 3
; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a4
+; RV32-NEXT: sltu a4, a1, a3
+; RV32-NEXT: addi a4, a4, -1
+; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: bltu a1, a2, .LBB103_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB103_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
;
@@ -2340,26 +2340,27 @@ define <vscale x 16 x double> @vpgather_baseidx_nxv16i16_nxv16f64(ptr %base, <vs
; RV64: # %bb.0:
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
-; RV64-NEXT: vsext.vf4 v16, v8
-; RV64-NEXT: vsll.vi v24, v16, 3
; RV64-NEXT: vsext.vf4 v16, v10
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: csrr a2, vlenb
; RV64-NEXT: sub a3, a1, a2
-; RV64-NEXT: sltu a4, a1, a3
-; RV64-NEXT: addi a4, a4, -1
-; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: srli a4, a2, 3
; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a4
+; RV64-NEXT: sltu a4, a1, a3
+; RV64-NEXT: addi a4, a4, -1
+; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
+; RV64-NEXT: vsetvli a3, zero, e64, m8, ta, ma
+; RV64-NEXT: vsext.vf4 v24, v8
+; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: bltu a1, a2, .LBB103_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB103_2:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, ptr %base, <vscale x 16 x i16> %idxs
@@ -2376,20 +2377,20 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
; RV32-NEXT: vsll.vi v24, v16, 3
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: sub a3, a1, a2
-; RV32-NEXT: sltu a4, a1, a3
-; RV32-NEXT: addi a4, a4, -1
-; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: srli a4, a2, 3
; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a4
+; RV32-NEXT: sltu a4, a1, a3
+; RV32-NEXT: addi a4, a4, -1
+; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: bltu a1, a2, .LBB104_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB104_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
;
@@ -2398,25 +2399,26 @@ define <vscale x 16 x double> @vpgather_baseidx_sext_nxv16i16_nxv16f64(ptr %base
; RV64-NEXT: vmv1r.v v12, v0
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v10
-; RV64-NEXT: vsext.vf4 v24, v8
-; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsll.vi v16, v16, 3
; RV64-NEXT: csrr a2, vlenb
; RV64-NEXT: sub a3, a1, a2
-; RV64-NEXT: sltu a4, a1, a3
-; RV64-NEXT: addi a4, a4, -1
-; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: srli a4, a2, 3
; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a4
+; RV64-NEXT: sltu a4, a1, a3
+; RV64-NEXT: addi a4, a4, -1
+; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
+; RV64-NEXT: vsetvli a3, zero, e64, m8, ta, ma
+; RV64-NEXT: vsext.vf4 v24, v8
+; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: bltu a1, a2, .LBB104_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB104_2:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 16 x i16> %idxs to <vscale x 16 x i64>
@@ -2434,20 +2436,20 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base
; RV32-NEXT: vsll.vi v24, v16, 3
; RV32-NEXT: csrr a2, vlenb
; RV32-NEXT: sub a3, a1, a2
-; RV32-NEXT: sltu a4, a1, a3
-; RV32-NEXT: addi a4, a4, -1
-; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: srli a4, a2, 3
; RV32-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a4
+; RV32-NEXT: sltu a4, a1, a3
+; RV32-NEXT: addi a4, a4, -1
+; RV32-NEXT: and a3, a4, a3
; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: bltu a1, a2, .LBB105_2
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB105_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
;
@@ -2459,20 +2461,20 @@ define <vscale x 16 x double> @vpgather_baseidx_zext_nxv16i16_nxv16f64(ptr %base
; RV64-NEXT: vsll.vi v24, v16, 3
; RV64-NEXT: csrr a2, vlenb
; RV64-NEXT: sub a3, a1, a2
-; RV64-NEXT: sltu a4, a1, a3
-; RV64-NEXT: addi a4, a4, -1
-; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: srli a4, a2, 3
; RV64-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a4
+; RV64-NEXT: sltu a4, a1, a3
+; RV64-NEXT: addi a4, a4, -1
+; RV64-NEXT: and a3, a4, a3
; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV64-NEXT: bltu a1, a2, .LBB105_2
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB105_2:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 16 x i16> %idxs to <vscale x 16 x i64>
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpload.ll b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
index f07c16476c56..1b1e9153a2fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpload.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpload.ll
@@ -444,18 +444,18 @@ define <vscale x 16 x double> @vpload_nxv16f64(ptr %ptr, <vscale x 16 x i1> %m,
; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a3, a4, a3
; CHECK-NEXT: slli a4, a2, 3
-; CHECK-NEXT: add a4, a0, a4
; CHECK-NEXT: srli a5, a2, 3
; CHECK-NEXT: vsetvli a6, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a5
+; CHECK-NEXT: add a4, a0, a4
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a4), v0.t
; CHECK-NEXT: bltu a1, a2, .LBB37_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB37_2:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 16 x double> @llvm.vp.load.nxv16f64.p0(ptr %ptr, <vscale x 16 x i1> %m, i32 %evl)
@@ -489,10 +489,10 @@ define <vscale x 16 x double> @vpload_nxv17f64(ptr %ptr, ptr %out, <vscale x 17
; CHECK-NEXT: addi a7, a7, -1
; CHECK-NEXT: and a6, a7, a6
; CHECK-NEXT: slli a7, a3, 3
-; CHECK-NEXT: add a7, a0, a7
; CHECK-NEXT: srli t0, a3, 3
; CHECK-NEXT: vsetvli t1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v8, t0
+; CHECK-NEXT: add a7, a0, a7
; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a7), v0.t
; CHECK-NEXT: sub a5, a2, a5
@@ -504,18 +504,18 @@ define <vscale x 16 x double> @vpload_nxv17f64(ptr %ptr, ptr %out, <vscale x 17
; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB38_4:
; CHECK-NEXT: slli a5, a3, 4
-; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: srli a6, a3, 2
; CHECK-NEXT: vsetvli a7, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v8, a6
+; CHECK-NEXT: add a5, a0, a5
; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a5), v0.t
; CHECK-NEXT: bltu a4, a3, .LBB38_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: .LBB38_6:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: vs1r.v v24, (a1)
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
index 26f7c56f05ce..094e6c9cc754 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpmerge-sdnode.ll
@@ -373,8 +373,8 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: vlm.v v0, (a2)
; CHECK-NEXT: sub a2, a3, a1
; CHECK-NEXT: sltu a4, a3, a2
-; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: vl8r.v v8, (a0)
+; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a2, a4, a2
; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vvm v16, v16, v24, v0
@@ -382,10 +382,10 @@ define <vscale x 128 x i8> @vpmerge_vv_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a1
; CHECK-NEXT: .LBB28_2:
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, tu, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -414,8 +414,8 @@ define <vscale x 128 x i8> @vpmerge_vx_nxv128i8(i8 %a, <vscale x 128 x i8> %vb,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB29_2:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vxm v8, v8, a0, v0
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 128 x i8> poison, i8 %a, i32 0
@@ -442,8 +442,8 @@ define <vscale x 128 x i8> @vpmerge_vi_nxv128i8(<vscale x 128 x i8> %vb, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB30_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, tu, ma
; CHECK-NEXT: vmerge.vim v8, v8, 2, v0
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.merge.nxv128i8(<vscale x 128 x i1> %m, <vscale x 128 x i8> splat (i8 2), <vscale x 128 x i8> %vb, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
index 351fc500145e..59662db42898 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpscatter-sdnode.ll
@@ -2124,10 +2124,10 @@ define void @vpscatter_nxv16f64(<vscale x 16 x double> %val, <vscale x 16 x ptr>
; RV32-NEXT: sub a2, a1, a0
; RV32-NEXT: sltu a1, a1, a2
; RV32-NEXT: addi a1, a1, -1
-; RV32-NEXT: and a1, a1, a2
; RV32-NEXT: srli a0, a0, 3
-; RV32-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
+; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a0
+; RV32-NEXT: and a1, a1, a2
; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (zero), v28, v0.t
; RV32-NEXT: ret
@@ -2157,13 +2157,13 @@ define void @vpscatter_nxv16f64(<vscale x 16 x double> %val, <vscale x 16 x ptr>
; RV64-NEXT: sub a0, a2, a1
; RV64-NEXT: sltu a2, a2, a0
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a0, a2, a0
; RV64-NEXT: srli a1, a1, 3
-; RV64-NEXT: vsetvli a2, zero, e8, mf4, ta, ma
+; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
+; RV64-NEXT: and a0, a2, a0
+; RV64-NEXT: addi a1, sp, 16
+; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; RV64-NEXT: addi a0, sp, 16
-; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
@@ -2192,10 +2192,10 @@ define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, pt
; RV32-NEXT: sub a3, a2, a1
; RV32-NEXT: sltu a2, a2, a3
; RV32-NEXT: addi a2, a2, -1
-; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: srli a1, a1, 3
-; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a1
+; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
@@ -2232,11 +2232,10 @@ define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, pt
; RV64-NEXT: sub a3, a2, a1
; RV64-NEXT: sltu a2, a2, a3
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: srli a1, a1, 3
-; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a1, sp, a1
@@ -2244,6 +2243,7 @@ define void @vpscatter_baseidx_nxv16i16_nxv16f64(<vscale x 16 x double> %val, pt
; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
; RV64-NEXT: addi a1, sp, 16
; RV64-NEXT: vl8r.v v16, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
@@ -2273,10 +2273,10 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV32-NEXT: sub a3, a2, a1
; RV32-NEXT: sltu a2, a2, a3
; RV32-NEXT: addi a2, a2, -1
-; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: srli a1, a1, 3
-; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a1
+; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
@@ -2308,22 +2308,22 @@ define void @vpscatter_baseidx_sext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a3, a1
; RV64-NEXT: .LBB101_2:
+; RV64-NEXT: addi a4, sp, 16
+; RV64-NEXT: vl1r.v v0, (a4) # Unknown-size Folded Reload
; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
-; RV64-NEXT: addi a3, sp, 16
-; RV64-NEXT: vl1r.v v0, (a3) # Unknown-size Folded Reload
; RV64-NEXT: vsoxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: sub a3, a2, a1
; RV64-NEXT: sltu a2, a2, a3
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: srli a1, a1, 3
-; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
+; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: add a1, sp, a1
; RV64-NEXT: addi a1, a1, 16
; RV64-NEXT: vl8r.v v8, (a1) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vsoxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: li a1, 10
@@ -2355,10 +2355,10 @@ define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV32-NEXT: sub a3, a2, a1
; RV32-NEXT: sltu a2, a2, a3
; RV32-NEXT: addi a2, a2, -1
-; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: srli a1, a1, 3
-; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV32-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vx v0, v0, a1
+; RV32-NEXT: and a2, a2, a3
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: ret
@@ -2380,10 +2380,10 @@ define void @vpscatter_baseidx_zext_nxv16i16_nxv16f64(<vscale x 16 x double> %va
; RV64-NEXT: sub a3, a2, a1
; RV64-NEXT: sltu a2, a2, a3
; RV64-NEXT: addi a2, a2, -1
-; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: srli a1, a1, 3
-; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; RV64-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vx v0, v0, a1
+; RV64-NEXT: and a2, a2, a3
; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vsoxei32.v v16, (a0), v28, v0.t
; RV64-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
index c12fc0497742..ce0ee38bc704 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpstore.ll
@@ -380,10 +380,10 @@ define void @vpstore_nxv16f64(<vscale x 16 x double> %val, ptr %ptr, <vscale x 1
; CHECK-NEXT: addi a1, a1, -1
; CHECK-NEXT: and a1, a1, a3
; CHECK-NEXT: slli a3, a2, 3
-; CHECK-NEXT: add a0, a0, a3
; CHECK-NEXT: srli a2, a2, 3
-; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vsetvli a4, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v0, a2
+; CHECK-NEXT: add a0, a0, a3
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vse64.v v16, (a0), v0.t
; CHECK-NEXT: ret
@@ -420,36 +420,36 @@ define void @vpstore_nxv17f64(<vscale x 17 x double> %val, ptr %ptr, <vscale x 1
; CHECK-NEXT: vl8re64.v v0, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
; CHECK-NEXT: vse64.v v8, (a1), v0.t
; CHECK-NEXT: sub a0, a5, a3
; CHECK-NEXT: sltu a5, a5, a0
; CHECK-NEXT: addi a5, a5, -1
-; CHECK-NEXT: and a0, a5, a0
-; CHECK-NEXT: slli a5, a3, 3
-; CHECK-NEXT: add a5, a1, a5
-; CHECK-NEXT: srli a6, a3, 3
+; CHECK-NEXT: and a5, a5, a0
+; CHECK-NEXT: slli a0, a3, 3
+; CHECK-NEXT: add a6, a1, a0
+; CHECK-NEXT: srli a0, a3, 3
; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v24, a6
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v24, a0
; CHECK-NEXT: sub a0, a2, a4
; CHECK-NEXT: sltu a2, a2, a0
; CHECK-NEXT: addi a2, a2, -1
; CHECK-NEXT: and a0, a2, a0
-; CHECK-NEXT: vse64.v v16, (a5), v0.t
+; CHECK-NEXT: vsetvli zero, a5, e64, m8, ta, ma
+; CHECK-NEXT: vse64.v v16, (a6), v0.t
; CHECK-NEXT: bltu a0, a3, .LBB31_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a0, a3
; CHECK-NEXT: .LBB31_6:
; CHECK-NEXT: slli a2, a3, 4
-; CHECK-NEXT: add a1, a1, a2
; CHECK-NEXT: srli a3, a3, 2
-; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vsetvli a4, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v24, a3
+; CHECK-NEXT: add a1, a1, a2
+; CHECK-NEXT: addi a2, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vse64.v v8, (a1), v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
index 6d42b15273cf..fa7830133834 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-sdnode.ll
@@ -1018,11 +1018,11 @@ declare half @llvm.vector.reduce.fmin.nxv10f16(<vscale x 10 x half>)
define half @vreduce_fmin_nxv10f16(<vscale x 10 x half> %v) {
; CHECK-LABEL: vreduce_fmin_nxv10f16:
; CHECK: # %bb.0:
+; CHECK-NEXT: lui a0, %hi(.LCPI73_0)
+; CHECK-NEXT: addi a0, a0, %lo(.LCPI73_0)
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
+; CHECK-NEXT: vlse16.v v12, (a0), zero
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: lui a1, %hi(.LCPI73_0)
-; CHECK-NEXT: addi a1, a1, %lo(.LCPI73_0)
-; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma
-; CHECK-NEXT: vlse16.v v12, (a1), zero
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a0
; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
index 46560fc501c6..f21b42e9519b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-fp-vp.ll
@@ -192,8 +192,8 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
; ZVFH-NEXT: vfmv.s.f v25, fa0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfredusum.vs v25, v8, v25, v0.t
-; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v24
+; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; ZVFH-NEXT: vfredusum.vs v25, v16, v25, v0.t
; ZVFH-NEXT: vfmv.f.s fa0, v25
; ZVFH-NEXT: ret
@@ -247,8 +247,8 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
; ZVFHMIN-NEXT: vfmv.s.f v8, fa5
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v6
+; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfredusum.vs v8, v24, v8, v0.t
; ZVFHMIN-NEXT: vfmv.f.s fa5, v8
; ZVFHMIN-NEXT: fcvt.h.s fa5, fa5
@@ -257,8 +257,8 @@ define half @vpreduce_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscale x
; ZVFHMIN-NEXT: vfmv.s.f v8, fa5
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfredusum.vs v8, v24, v8, v0.t
; ZVFHMIN-NEXT: vfmv.f.s fa5, v8
; ZVFHMIN-NEXT: fcvt.h.s fa5, fa5
@@ -298,8 +298,8 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscal
; ZVFH-NEXT: vfmv.s.f v25, fa0
; ZVFH-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; ZVFH-NEXT: vfredosum.vs v25, v8, v25, v0.t
-; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; ZVFH-NEXT: vmv1r.v v0, v24
+; ZVFH-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; ZVFH-NEXT: vfredosum.vs v25, v16, v25, v0.t
; ZVFH-NEXT: vfmv.f.s fa0, v25
; ZVFH-NEXT: ret
@@ -353,8 +353,8 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscal
; ZVFHMIN-NEXT: vfmv.s.f v8, fa5
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12
-; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v6
+; ZVFHMIN-NEXT: vsetvli zero, a5, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfredosum.vs v8, v24, v8, v0.t
; ZVFHMIN-NEXT: vfmv.f.s fa5, v8
; ZVFHMIN-NEXT: fcvt.h.s fa5, fa5
@@ -363,8 +363,8 @@ define half @vpreduce_ord_fadd_nxv64f16(half %s, <vscale x 64 x half> %v, <vscal
; ZVFHMIN-NEXT: vfmv.s.f v8, fa5
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v16
-; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfredosum.vs v8, v24, v8, v0.t
; ZVFHMIN-NEXT: vfmv.f.s fa5, v8
; ZVFHMIN-NEXT: fcvt.h.s fa5, fa5
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
index 7bcf37b1af3c..c41ddaee75a8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-int-vp.ll
@@ -1115,8 +1115,8 @@ define signext i32 @vpreduce_umax_nxv32i32(i32 signext %s, <vscale x 32 x i32> %
; CHECK-NEXT: vmv.s.x v25, a0
; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vredmaxu.vs v25, v8, v25, v0.t
-; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vredmaxu.vs v25, v16, v25, v0.t
; CHECK-NEXT: vmv.x.s a0, v25
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
index 94ed7e568a01..39666bb6119a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vreductions-mask-vp.ll
@@ -24,8 +24,8 @@ define zeroext i1 @vpreduce_or_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vsca
; CHECK-LABEL: vpreduce_or_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -40,8 +40,8 @@ define zeroext i1 @vpreduce_xor_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_xor_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -72,8 +72,8 @@ define zeroext i1 @vpreduce_or_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vsca
; CHECK-LABEL: vpreduce_or_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -88,8 +88,8 @@ define zeroext i1 @vpreduce_xor_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_xor_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -120,8 +120,8 @@ define zeroext i1 @vpreduce_or_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vsca
; CHECK-LABEL: vpreduce_or_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -136,8 +136,8 @@ define zeroext i1 @vpreduce_xor_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_xor_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -168,8 +168,8 @@ define zeroext i1 @vpreduce_or_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vsca
; CHECK-LABEL: vpreduce_or_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -184,8 +184,8 @@ define zeroext i1 @vpreduce_xor_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_xor_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -216,8 +216,8 @@ define zeroext i1 @vpreduce_or_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <vs
; CHECK-LABEL: vpreduce_or_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -232,8 +232,8 @@ define zeroext i1 @vpreduce_xor_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <v
; CHECK-LABEL: vpreduce_xor_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -264,8 +264,8 @@ define zeroext i1 @vpreduce_or_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <vs
; CHECK-LABEL: vpreduce_or_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -280,8 +280,8 @@ define zeroext i1 @vpreduce_xor_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <v
; CHECK-LABEL: vpreduce_xor_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -296,8 +296,8 @@ define zeroext i1 @vpreduce_or_nxv40i1(i1 zeroext %s, <vscale x 40 x i1> %v, <vs
; CHECK-LABEL: vpreduce_or_nxv40i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -328,8 +328,8 @@ define zeroext i1 @vpreduce_or_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <vs
; CHECK-LABEL: vpreduce_or_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -344,8 +344,8 @@ define zeroext i1 @vpreduce_xor_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <v
; CHECK-LABEL: vpreduce_xor_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -366,16 +366,16 @@ define zeroext i1 @vpreduce_or_nxv128i1(i1 zeroext %s, <vscale x 128 x i1> %v, <
; CHECK-NEXT: sltu a4, a1, a3
; CHECK-NEXT: addi a4, a4, -1
; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v10
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a3, v8, v0.t
; CHECK-NEXT: snez a3, a3
; CHECK-NEXT: bltu a1, a2, .LBB22_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v9
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v11, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -391,8 +391,8 @@ define zeroext i1 @vpreduce_add_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_add_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -407,8 +407,8 @@ define zeroext i1 @vpreduce_add_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_add_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -423,8 +423,8 @@ define zeroext i1 @vpreduce_add_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_add_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -439,8 +439,8 @@ define zeroext i1 @vpreduce_add_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vsc
; CHECK-LABEL: vpreduce_add_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -455,8 +455,8 @@ define zeroext i1 @vpreduce_add_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <v
; CHECK-LABEL: vpreduce_add_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -471,8 +471,8 @@ define zeroext i1 @vpreduce_add_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <v
; CHECK-LABEL: vpreduce_add_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -487,8 +487,8 @@ define zeroext i1 @vpreduce_add_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <v
; CHECK-LABEL: vpreduce_add_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: andi a1, a1, 1
; CHECK-NEXT: xor a0, a1, a0
@@ -616,8 +616,8 @@ define zeroext i1 @vpreduce_smin_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vs
; CHECK-LABEL: vpreduce_smin_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -632,8 +632,8 @@ define zeroext i1 @vpreduce_smin_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vs
; CHECK-LABEL: vpreduce_smin_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -648,8 +648,8 @@ define zeroext i1 @vpreduce_smin_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vs
; CHECK-LABEL: vpreduce_smin_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -664,8 +664,8 @@ define zeroext i1 @vpreduce_smin_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vs
; CHECK-LABEL: vpreduce_smin_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -680,8 +680,8 @@ define zeroext i1 @vpreduce_smin_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <
; CHECK-LABEL: vpreduce_smin_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -696,8 +696,8 @@ define zeroext i1 @vpreduce_smin_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <
; CHECK-LABEL: vpreduce_smin_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -712,8 +712,8 @@ define zeroext i1 @vpreduce_smin_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <
; CHECK-LABEL: vpreduce_smin_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -728,8 +728,8 @@ define zeroext i1 @vpreduce_umax_nxv1i1(i1 zeroext %s, <vscale x 1 x i1> %v, <vs
; CHECK-LABEL: vpreduce_umax_nxv1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -744,8 +744,8 @@ define zeroext i1 @vpreduce_umax_nxv2i1(i1 zeroext %s, <vscale x 2 x i1> %v, <vs
; CHECK-LABEL: vpreduce_umax_nxv2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -760,8 +760,8 @@ define zeroext i1 @vpreduce_umax_nxv4i1(i1 zeroext %s, <vscale x 4 x i1> %v, <vs
; CHECK-LABEL: vpreduce_umax_nxv4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -776,8 +776,8 @@ define zeroext i1 @vpreduce_umax_nxv8i1(i1 zeroext %s, <vscale x 8 x i1> %v, <vs
; CHECK-LABEL: vpreduce_umax_nxv8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -792,8 +792,8 @@ define zeroext i1 @vpreduce_umax_nxv16i1(i1 zeroext %s, <vscale x 16 x i1> %v, <
; CHECK-LABEL: vpreduce_umax_nxv16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -808,8 +808,8 @@ define zeroext i1 @vpreduce_umax_nxv32i1(i1 zeroext %s, <vscale x 32 x i1> %v, <
; CHECK-LABEL: vpreduce_umax_nxv32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
@@ -824,8 +824,8 @@ define zeroext i1 @vpreduce_umax_nxv64i1(i1 zeroext %s, <vscale x 64 x i1> %v, <
; CHECK-LABEL: vpreduce_umax_nxv64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vcpop.m a1, v9, v0.t
; CHECK-NEXT: snez a1, a1
; CHECK-NEXT: or a0, a1, a0
diff --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
index 462d49991ae4..e95e9fabe934 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
@@ -12,11 +12,11 @@
define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscale x 1 x i16> %vs12.i.i.i, <vscale x 1 x i16> %1, <vscale x 8 x i8> %v37) {
; NOSUBREG-LABEL: foo:
; NOSUBREG: # %bb.0: # %loopIR.preheader.i.i
-; NOSUBREG-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; NOSUBREG-NEXT: vmv.v.i v14, 0
-; NOSUBREG-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; NOSUBREG-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; NOSUBREG-NEXT: vmv.v.i v9, 0
-; NOSUBREG-NEXT: vmv.v.i v8, 0
+; NOSUBREG-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; NOSUBREG-NEXT: vmv.v.i v14, 0
+; NOSUBREG-NEXT: vmv1r.v v8, v9
; NOSUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; NOSUBREG-NEXT: vrgatherei16.vv v8, v9, v14
; NOSUBREG-NEXT: .LBB0_1: # %loopIR3.i.i
@@ -32,11 +32,11 @@ define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscal
;
; SUBREG-LABEL: foo:
; SUBREG: # %bb.0: # %loopIR.preheader.i.i
-; SUBREG-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; SUBREG-NEXT: vmv.v.i v14, 0
-; SUBREG-NEXT: vsetvli zero, zero, e8, m1, ta, ma
+; SUBREG-NEXT: vsetvli a0, zero, e8, m1, ta, ma
; SUBREG-NEXT: vmv.v.i v9, 0
-; SUBREG-NEXT: vmv.v.i v8, 0
+; SUBREG-NEXT: vsetvli zero, zero, e16, m2, ta, ma
+; SUBREG-NEXT: vmv.v.i v14, 0
+; SUBREG-NEXT: vmv1r.v v8, v9
; SUBREG-NEXT: vsetivli zero, 4, e8, m1, tu, ma
; SUBREG-NEXT: vrgatherei16.vv v8, v9, v14
; SUBREG-NEXT: .LBB0_1: # %loopIR3.i.i
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
index f9ea5143cfcb..71b91f56e89a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll
@@ -587,8 +587,8 @@ define <vscale x 128 x i8> @vsadd_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.sadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
@@ -1366,8 +1366,8 @@ define <vscale x 32 x i32> @vsadd_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.sadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
index 745b93b25708..454a4ebab04a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll
@@ -586,8 +586,8 @@ define <vscale x 128 x i8> @vsaddu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.uadd.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
@@ -1365,8 +1365,8 @@ define <vscale x 32 x i32> @vsaddu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsaddu.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.uadd.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
index 4457c1002acc..53b8e4a78b75 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-fp.ll
@@ -211,12 +211,12 @@ define <vscale x 32 x half> @vfmerge_fv_nxv32f16(<vscale x 32 x half> %va, half
; CHECK-ZVFHMIN: # %bb.0:
; CHECK-ZVFHMIN-NEXT: fcvt.s.h fa5, fa0
; CHECK-ZVFHMIN-NEXT: vsetvli a0, zero, e32, m8, ta, ma
-; CHECK-ZVFHMIN-NEXT: vfmv.v.f v24, fa5
+; CHECK-ZVFHMIN-NEXT: vfmv.v.f v16, fa5
; CHECK-ZVFHMIN-NEXT: vsetvli zero, zero, e16, m4, ta, ma
-; CHECK-ZVFHMIN-NEXT: vfncvt.f.f.w v16, v24
-; CHECK-ZVFHMIN-NEXT: vmv.v.v v20, v16
+; CHECK-ZVFHMIN-NEXT: vfncvt.f.f.w v24, v16
+; CHECK-ZVFHMIN-NEXT: vmv.v.v v28, v24
; CHECK-ZVFHMIN-NEXT: vsetvli a0, zero, e16, m8, ta, ma
-; CHECK-ZVFHMIN-NEXT: vmerge.vvm v8, v8, v16, v0
+; CHECK-ZVFHMIN-NEXT: vmerge.vvm v8, v8, v24, v0
; CHECK-ZVFHMIN-NEXT: ret
%head = insertelement <vscale x 32 x half> poison, half %b, i32 0
%splat = shufflevector <vscale x 32 x half> %head, <vscale x 32 x half> poison, <vscale x 32 x i32> zeroinitializer
diff --git a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
index 312378d39373..ee0617c93148 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vselect-vp.ll
@@ -354,11 +354,17 @@ define <vscale x 32 x i32> @select_nxv32i32(<vscale x 32 x i1> %a, <vscale x 32
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: li a3, 24
+; CHECK-NEXT: mul a1, a1, a3
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
@@ -367,35 +373,51 @@ define <vscale x 32 x i32> @select_nxv32i32(<vscale x 32 x i1> %a, <vscale x 32
; CHECK-NEXT: slli a1, a3, 3
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: vl8re32.v v8, (a1)
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
; CHECK-NEXT: slli a1, a3, 1
; CHECK-NEXT: sub a4, a2, a1
; CHECK-NEXT: sltu a5, a2, a4
; CHECK-NEXT: addi a5, a5, -1
-; CHECK-NEXT: and a4, a5, a4
; CHECK-NEXT: srli a3, a3, 2
-; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vl8re32.v v0, (a0)
+; CHECK-NEXT: vl8re32.v v8, (a0)
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vslidedown.vx v0, v24, a3
+; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a3
+; CHECK-NEXT: and a4, a5, a4
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
; CHECK-NEXT: bltu a2, a1, .LBB27_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB27_2:
-; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: li a1, 24
+; CHECK-NEXT: mul a0, a0, a1
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
@@ -411,11 +433,17 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: slli a1, a1, 5
; CHECK-NEXT: sub sp, sp, a1
-; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
+; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: slli a1, a1, 3
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: add a1, sp, a1
+; CHECK-NEXT: addi a1, a1, 16
+; CHECK-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a1, a1, a2
; CHECK-NEXT: add a1, sp, a1
; CHECK-NEXT: addi a1, a1, 16
; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill
@@ -424,35 +452,51 @@ define <vscale x 32 x i32> @select_evl_nxv32i32(<vscale x 32 x i1> %a, <vscale x
; CHECK-NEXT: slli a2, a1, 3
; CHECK-NEXT: add a2, a0, a2
; CHECK-NEXT: vl8re32.v v8, (a2)
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: slli a2, a2, 3
+; CHECK-NEXT: add a2, sp, a2
+; CHECK-NEXT: addi a2, a2, 16
+; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill
; CHECK-NEXT: slli a2, a1, 1
; CHECK-NEXT: sub a3, a1, a2
; CHECK-NEXT: sltu a4, a1, a3
; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: srli a4, a1, 2
-; CHECK-NEXT: vsetvli a5, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vl8re32.v v0, (a0)
+; CHECK-NEXT: srli a5, a1, 2
+; CHECK-NEXT: vl8re32.v v8, (a0)
; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vslidedown.vx v0, v24, a4
+; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
+; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v0, a5
+; CHECK-NEXT: and a3, a4, a3
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: add a0, sp, a0
+; CHECK-NEXT: addi a0, a0, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
; CHECK-NEXT: bltu a1, a2, .LBB28_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB28_2:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: li a2, 24
+; CHECK-NEXT: mul a0, a0, a2
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
+; CHECK-NEXT: addi a0, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
+; CHECK-NEXT: vmerge.vvm v8, v8, v24, v0
; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
+; CHECK-NEXT: slli a0, a0, 5
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
@@ -714,20 +758,19 @@ define <vscale x 16 x double> @select_nxv16f64(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: sub a3, a2, a1
; CHECK-NEXT: sltu a4, a2, a3
; CHECK-NEXT: addi a4, a4, -1
-; CHECK-NEXT: and a3, a4, a3
-; CHECK-NEXT: srli a4, a1, 3
-; CHECK-NEXT: vsetvli a5, zero, e8, mf4, ta, ma
+; CHECK-NEXT: srli a5, a1, 3
; CHECK-NEXT: vl8re64.v v0, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vslidedown.vx v0, v24, a4
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vx v0, v24, a5
+; CHECK-NEXT: and a3, a4, a3
; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v16, v8, v16, v0
; CHECK-NEXT: bltu a2, a1, .LBB48_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB48_2:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
@@ -736,6 +779,7 @@ define <vscale x 16 x double> @select_nxv16f64(<vscale x 16 x i1> %a, <vscale x
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmerge.vvm v8, v24, v8, v0
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index 9d5ff00fd597..7eb6cacf1ca4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -341,13 +341,13 @@ define <vscale x 1 x double> @test8(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs1r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: call foo
-; CHECK-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: add a0, a0, sp
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl1r.v v8, (a0) # Unknown-size Folded Reload
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, s0, e64, m1, ta, ma
; CHECK-NEXT: vfsub.vv v8, v9, v8
; CHECK-NEXT: .LBB6_3: # %if.then
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
index 834e7dd85aea..9b5a1a54ad5d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsext-vp.ll
@@ -167,8 +167,8 @@ define <vscale x 32 x i32> @vsext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscal
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsext.vf4 v24, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
index 706876dc3854..ca44a9a64de4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
@@ -386,7 +386,15 @@ declare <vscale x 32 x half> @llvm.vp.sitofp.nxv32f16.nxv32i32(<vscale x 32 x i3
define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vsitofp_nxv32f16_nxv32i32:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v24, v0
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 3
+; ZVFH-NEXT: sub sp, sp, a1
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: vmv1r.v v7, v0
+; ZVFH-NEXT: addi a1, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: srli a2, a1, 2
; ZVFH-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
@@ -396,16 +404,22 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFH-NEXT: sltu a3, a0, a2
; ZVFH-NEXT: addi a3, a3, -1
; ZVFH-NEXT: and a2, a3, a2
+; ZVFH-NEXT: addi a3, sp, 16
+; ZVFH-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; ZVFH-NEXT: vfncvt.f.x.w v28, v16, v0.t
+; ZVFH-NEXT: vfncvt.f.x.w v20, v24, v0.t
; ZVFH-NEXT: bltu a0, a1, .LBB25_2
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a0, a1
; ZVFH-NEXT: .LBB25_2:
+; ZVFH-NEXT: vmv1r.v v0, v7
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFH-NEXT: vmv1r.v v0, v24
-; ZVFH-NEXT: vfncvt.f.x.w v24, v8, v0.t
-; ZVFH-NEXT: vmv8r.v v8, v24
+; ZVFH-NEXT: vfncvt.f.x.w v16, v8, v0.t
+; ZVFH-NEXT: vmv8r.v v8, v16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: addi sp, sp, 16
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vsitofp_nxv32f16_nxv32i32:
@@ -428,8 +442,8 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB25_2:
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.f.x.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
@@ -460,8 +474,8 @@ define <vscale x 32 x float> @vsitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.f.x.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x float> @llvm.vp.sitofp.nxv32f32.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
index b56a0f40176c..613b58b0f1b8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssub-vp.ll
@@ -593,22 +593,22 @@ define <vscale x 128 x i8> @vssub_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale x
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: sub a0, a1, a2
-; CHECK-NEXT: sltu a3, a1, a0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub a2, a1, a0
+; CHECK-NEXT: sltu a3, a1, a2
; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a0
-; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
-; CHECK-NEXT: vssub.vx v16, v16, a0, v0.t
-; CHECK-NEXT: bltu a1, a2, .LBB50_2
+; CHECK-NEXT: vssub.vx v16, v16, a2, v0.t
+; CHECK-NEXT: bltu a1, a0, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vssub.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a2, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.ssub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
ret <vscale x 128 x i8> %v
@@ -1393,25 +1393,25 @@ define <vscale x 32 x i32> @vssub_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale x
; CHECK-LABEL: vssub_vi_nxv32i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: srli a1, a2, 2
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v0, a1
-; CHECK-NEXT: slli a2, a2, 1
-; CHECK-NEXT: sub a1, a0, a2
-; CHECK-NEXT: sltu a3, a0, a1
+; CHECK-NEXT: vslidedown.vx v0, v0, a2
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a1
-; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: vssub.vx v16, v16, a1, v0.t
-; CHECK-NEXT: bltu a0, a2, .LBB118_2
+; CHECK-NEXT: vssub.vx v16, v16, a2, v0.t
+; CHECK-NEXT: bltu a0, a1, .LBB118_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a2
+; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vssub.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssub.vx v8, v8, a2, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.ssub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x i32> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
index 8275c3081c7c..8c729d7d9bfb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll
@@ -591,22 +591,22 @@ define <vscale x 128 x i8> @vssubu_vi_nxv128i8(<vscale x 128 x i8> %va, <vscale
; CHECK-NEXT: vmv1r.v v24, v0
; CHECK-NEXT: vsetvli a2, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v0, (a0)
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: slli a2, a2, 3
-; CHECK-NEXT: sub a0, a1, a2
-; CHECK-NEXT: sltu a3, a1, a0
+; CHECK-NEXT: csrr a0, vlenb
+; CHECK-NEXT: slli a0, a0, 3
+; CHECK-NEXT: sub a2, a1, a0
+; CHECK-NEXT: sltu a3, a1, a2
; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a0
-; CHECK-NEXT: li a0, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v16, v16, a0, v0.t
-; CHECK-NEXT: bltu a1, a2, .LBB50_2
+; CHECK-NEXT: vssubu.vx v16, v16, a2, v0.t
+; CHECK-NEXT: bltu a1, a0, .LBB50_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a1, a2
+; CHECK-NEXT: mv a1, a0
; CHECK-NEXT: .LBB50_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vssubu.vx v8, v8, a0, v0.t
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a2, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 128 x i8> @llvm.vp.usub.sat.nxv128i8(<vscale x 128 x i8> %va, <vscale x 128 x i8> splat (i8 -1), <vscale x 128 x i1> %m, i32 %evl)
ret <vscale x 128 x i8> %v
@@ -1391,25 +1391,25 @@ define <vscale x 32 x i32> @vssubu_vi_nxv32i32(<vscale x 32 x i32> %va, <vscale
; CHECK-LABEL: vssubu_vi_nxv32i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v24, v0
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: srli a1, a2, 2
+; CHECK-NEXT: csrr a1, vlenb
+; CHECK-NEXT: srli a2, a1, 2
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vx v0, v0, a1
-; CHECK-NEXT: slli a2, a2, 1
-; CHECK-NEXT: sub a1, a0, a2
-; CHECK-NEXT: sltu a3, a0, a1
+; CHECK-NEXT: vslidedown.vx v0, v0, a2
+; CHECK-NEXT: slli a1, a1, 1
+; CHECK-NEXT: sub a2, a0, a1
+; CHECK-NEXT: sltu a3, a0, a2
; CHECK-NEXT: addi a3, a3, -1
-; CHECK-NEXT: and a3, a3, a1
-; CHECK-NEXT: li a1, -1
+; CHECK-NEXT: and a3, a3, a2
+; CHECK-NEXT: li a2, -1
; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
-; CHECK-NEXT: vssubu.vx v16, v16, a1, v0.t
-; CHECK-NEXT: bltu a0, a2, .LBB118_2
+; CHECK-NEXT: vssubu.vx v16, v16, a2, v0.t
+; CHECK-NEXT: bltu a0, a1, .LBB118_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: mv a0, a2
+; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
-; CHECK-NEXT: vssubu.vx v8, v8, a1, v0.t
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
+; CHECK-NEXT: vssubu.vx v8, v8, a2, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i32> @llvm.vp.usub.sat.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i32> splat (i32 -1), <vscale x 32 x i1> %m, i32 %evl)
ret <vscale x 32 x i32> %v
diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
index 4857810e7a17..27755c166cc5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
@@ -174,8 +174,8 @@ define <vscale x 15 x i16> @vtrunc_nxv15i16_nxv15i64(<vscale x 15 x i64> %a, <vs
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t
@@ -232,8 +232,8 @@ define <vscale x 32 x i7> @vtrunc_nxv32i7_nxv32i32(<vscale x 32 x i32> %a, <vsca
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB15_2:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t
@@ -266,8 +266,8 @@ define <vscale x 32 x i8> @vtrunc_nxv32i8_nxv32i32(<vscale x 32 x i32> %a, <vsca
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB16_2:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v20, 0, v0.t
@@ -312,11 +312,11 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: sub a6, a5, a1
; CHECK-NEXT: sltu a7, a5, a6
; CHECK-NEXT: addi a7, a7, -1
-; CHECK-NEXT: and a6, a7, a6
-; CHECK-NEXT: vsetvli a7, zero, e8, mf4, ta, ma
; CHECK-NEXT: vl8re64.v v24, (a0)
+; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v0, v16, a3
-; CHECK-NEXT: vsetvli zero, a6, e32, m4, ta, ma
+; CHECK-NEXT: and a0, a7, a6
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v20, v8, 0, v0.t
; CHECK-NEXT: bltu a5, a1, .LBB17_2
; CHECK-NEXT: # %bb.1:
@@ -324,8 +324,8 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: .LBB17_2:
; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vx v6, v7, a3
-; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v16
+; CHECK-NEXT: vsetvli zero, a5, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v16, v24, 0, v0.t
; CHECK-NEXT: bltu a2, a4, .LBB17_4
; CHECK-NEXT: # %bb.3:
@@ -335,22 +335,22 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
; CHECK-NEXT: sltu a3, a2, a0
; CHECK-NEXT: addi a3, a3, -1
; CHECK-NEXT: and a0, a3, a0
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v6
-; CHECK-NEXT: addi a0, sp, 16
-; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: addi a3, sp, 16
+; CHECK-NEXT: vl8r.v v8, (a3) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v28, v8, 0, v0.t
; CHECK-NEXT: bltu a2, a1, .LBB17_6
; CHECK-NEXT: # %bb.5:
; CHECK-NEXT: mv a2, a1
; CHECK-NEXT: .LBB17_6:
-; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vmv1r.v v0, v7
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 3
; CHECK-NEXT: add a0, sp, a0
; CHECK-NEXT: addi a0, a0, 16
; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-NEXT: vnsrl.wi v24, v8, 0, v0.t
; CHECK-NEXT: vmv8r.v v8, v24
; CHECK-NEXT: csrr a0, vlenb
diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
index e083d594db25..e5941dc7b5ab 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
@@ -386,7 +386,15 @@ declare <vscale x 32 x half> @llvm.vp.uitofp.nxv32f16.nxv32i32(<vscale x 32 x i3
define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; ZVFH-LABEL: vuitofp_nxv32f16_nxv32i32:
; ZVFH: # %bb.0:
-; ZVFH-NEXT: vmv1r.v v24, v0
+; ZVFH-NEXT: addi sp, sp, -16
+; ZVFH-NEXT: .cfi_def_cfa_offset 16
+; ZVFH-NEXT: csrr a1, vlenb
+; ZVFH-NEXT: slli a1, a1, 3
+; ZVFH-NEXT: sub sp, sp, a1
+; ZVFH-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
+; ZVFH-NEXT: vmv1r.v v7, v0
+; ZVFH-NEXT: addi a1, sp, 16
+; ZVFH-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill
; ZVFH-NEXT: csrr a1, vlenb
; ZVFH-NEXT: srli a2, a1, 2
; ZVFH-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
@@ -396,16 +404,22 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFH-NEXT: sltu a3, a0, a2
; ZVFH-NEXT: addi a3, a3, -1
; ZVFH-NEXT: and a2, a3, a2
+; ZVFH-NEXT: addi a3, sp, 16
+; ZVFH-NEXT: vl8r.v v24, (a3) # Unknown-size Folded Reload
; ZVFH-NEXT: vsetvli zero, a2, e16, m4, ta, ma
-; ZVFH-NEXT: vfncvt.f.xu.w v28, v16, v0.t
+; ZVFH-NEXT: vfncvt.f.xu.w v20, v24, v0.t
; ZVFH-NEXT: bltu a0, a1, .LBB25_2
; ZVFH-NEXT: # %bb.1:
; ZVFH-NEXT: mv a0, a1
; ZVFH-NEXT: .LBB25_2:
+; ZVFH-NEXT: vmv1r.v v0, v7
; ZVFH-NEXT: vsetvli zero, a0, e16, m4, ta, ma
-; ZVFH-NEXT: vmv1r.v v0, v24
-; ZVFH-NEXT: vfncvt.f.xu.w v24, v8, v0.t
-; ZVFH-NEXT: vmv8r.v v8, v24
+; ZVFH-NEXT: vfncvt.f.xu.w v16, v8, v0.t
+; ZVFH-NEXT: vmv8r.v v8, v16
+; ZVFH-NEXT: csrr a0, vlenb
+; ZVFH-NEXT: slli a0, a0, 3
+; ZVFH-NEXT: add sp, sp, a0
+; ZVFH-NEXT: addi sp, sp, 16
; ZVFH-NEXT: ret
;
; ZVFHMIN-LABEL: vuitofp_nxv32f16_nxv32i32:
@@ -428,8 +442,8 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
; ZVFHMIN-NEXT: # %bb.1:
; ZVFHMIN-NEXT: mv a0, a1
; ZVFHMIN-NEXT: .LBB25_2:
-; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vmv1r.v v0, v7
+; ZVFHMIN-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; ZVFHMIN-NEXT: vfcvt.f.xu.v v8, v8, v0.t
; ZVFHMIN-NEXT: vsetvli a0, zero, e16, m4, ta, ma
; ZVFHMIN-NEXT: vfncvt.f.f.w v16, v8
@@ -460,8 +474,8 @@ define <vscale x 32 x float> @vuitofp_nxv32f32_nxv32i32(<vscale x 32 x i32> %va,
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB26_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfcvt.f.xu.v v8, v8, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x float> @llvm.vp.uitofp.nxv32f32.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 %evl)
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
index c5f34eee3118..a869b433a495 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert.ll
@@ -85,9 +85,9 @@ define <vscale x 1 x i8> @test3(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vsc
; RV32-NEXT: vaadd.vv v8, v8, v9
; RV32-NEXT: call foo
; RV32-NEXT: csrwi vxrm, 0
-; RV32-NEXT: vsetvli zero, s0, e8, mf8, ta, ma
; RV32-NEXT: addi a0, sp, 16
; RV32-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV32-NEXT: vsetvli zero, s0, e8, mf8, ta, ma
; RV32-NEXT: vaadd.vv v8, v8, v9
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
@@ -113,9 +113,9 @@ define <vscale x 1 x i8> @test3(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vsc
; RV64-NEXT: vaadd.vv v8, v8, v9
; RV64-NEXT: call foo
; RV64-NEXT: csrwi vxrm, 0
-; RV64-NEXT: vsetvli zero, s0, e8, mf8, ta, ma
; RV64-NEXT: addi a0, sp, 16
; RV64-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
+; RV64-NEXT: vsetvli zero, s0, e8, mf8, ta, ma
; RV64-NEXT: vaadd.vv v8, v8, v9
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 1
diff --git a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
index 400f89b1ef77..3b5541c1a244 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vzext-vp.ll
@@ -167,8 +167,8 @@ define <vscale x 32 x i32> @vzext_nxv32i8_nxv32i32(<vscale x 32 x i8> %a, <vscal
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB12_2:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v12
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vzext.vf4 v24, v8, v0.t
; CHECK-NEXT: vmv.v.v v8, v24
; CHECK-NEXT: ret
diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
index 9ecfa5017831..56c1ad3527aa 100644
--- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
+++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll
@@ -643,9 +643,9 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32MV-NEXT: mv a1, s4
; RV32MV-NEXT: li a3, 0
; RV32MV-NEXT: call __moddi3
-; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32MV-NEXT: addi a2, sp, 16
; RV32MV-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32MV-NEXT: vslide1down.vx v8, v8, a0
; RV32MV-NEXT: vslide1down.vx v8, v8, a1
; RV32MV-NEXT: addi a0, sp, 16
@@ -655,9 +655,9 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV32MV-NEXT: mv a0, s1
; RV32MV-NEXT: mv a1, s3
; RV32MV-NEXT: call __moddi3
-; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32MV-NEXT: addi a2, sp, 16
; RV32MV-NEXT: vl2r.v v8, (a2) # Unknown-size Folded Reload
+; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32MV-NEXT: vslide1down.vx v8, v8, a0
; RV32MV-NEXT: vslide1down.vx v8, v8, a1
; RV32MV-NEXT: vslidedown.vi v8, v8, 2
@@ -779,7 +779,6 @@ define void @test_srem_vec(ptr %X) nounwind {
; RV64MV-NEXT: vmsne.vv v0, v8, v12
; RV64MV-NEXT: vmv.v.i v8, 0
; RV64MV-NEXT: vmerge.vim v8, v8, -1, v0
-; RV64MV-NEXT: vsetivli zero, 1, e64, m2, ta, ma
; RV64MV-NEXT: vslidedown.vi v10, v8, 2
; RV64MV-NEXT: vmv.x.s a2, v10
; RV64MV-NEXT: slli a3, a2, 31
diff --git a/llvm/test/CodeGen/SPIRV/execution-mode-reqd_work_group_size.ll b/llvm/test/CodeGen/SPIRV/execution-mode-reqd_work_group_size.ll
new file mode 100644
index 000000000000..6e36b0bd5b9d
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/execution-mode-reqd_work_group_size.ll
@@ -0,0 +1,35 @@
+; From Khronos Translator's test case: test/reqd_work_group_size_md.ll
+
+; The purpose of this test is to check that the reqd_work_group_size metadata
+; is correctly converted to the LocalSize execution mode for the kernels it is
+; applied to.
+
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpMemoryModel
+; CHECK-DAG: OpEntryPoint Kernel %[[#ENTRY1:]] "test1"
+; CHECK-DAG: OpEntryPoint Kernel %[[#ENTRY2:]] "test2"
+; CHECK-DAG: OpEntryPoint Kernel %[[#ENTRY3:]] "test3"
+; CHECK-DAG: OpExecutionMode %[[#ENTRY1]] LocalSize 1 2 3
+; CHECK-DAG: OpExecutionMode %[[#ENTRY2]] LocalSize 2 3 1
+; CHECK-DAG: OpExecutionMode %[[#ENTRY3]] LocalSize 3 1 1
+
+define spir_kernel void @test1() !reqd_work_group_size !1 {
+entry:
+ ret void
+}
+
+define spir_kernel void @test2() !reqd_work_group_size !2 {
+entry:
+ ret void
+}
+
+define spir_kernel void @test3() !reqd_work_group_size !3 {
+entry:
+ ret void
+}
+
+!1 = !{i32 1, i32 2, i32 3}
+!2 = !{i32 2, i32 3}
+!3 = !{i32 3}
diff --git a/llvm/test/CodeGen/SPIRV/execution-mode-work_group_size_hint.ll b/llvm/test/CodeGen/SPIRV/execution-mode-work_group_size_hint.ll
new file mode 100644
index 000000000000..f2c43d3748af
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/execution-mode-work_group_size_hint.ll
@@ -0,0 +1,34 @@
+; From Khronos Translator's test case: test/reqd_work_group_size_md.ll
+
+; The purpose of this test is to check that the work_group_size_hint metadata
+; is correctly converted to the LocalSizeHint execution mode.
+
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK: OpMemoryModel
+; CHECK-DAG: OpEntryPoint Kernel %[[#ENTRY1:]] "test1"
+; CHECK-DAG: OpEntryPoint Kernel %[[#ENTRY2:]] "test2"
+; CHECK-DAG: OpEntryPoint Kernel %[[#ENTRY3:]] "test3"
+; CHECK-DAG: OpExecutionMode %[[#ENTRY1]] LocalSizeHint 1 2 3
+; CHECK-DAG: OpExecutionMode %[[#ENTRY2]] LocalSizeHint 2 3 1
+; CHECK-DAG: OpExecutionMode %[[#ENTRY3]] LocalSizeHint 3 1 1
+
+define spir_kernel void @test1() !work_group_size_hint !1 {
+entry:
+ ret void
+}
+
+define spir_kernel void @test2() !work_group_size_hint !2 {
+entry:
+ ret void
+}
+
+define spir_kernel void @test3() !work_group_size_hint !3 {
+entry:
+ ret void
+}
+
+!1 = !{i32 1, i32 2, i32 3}
+!2 = !{i32 2, i32 3}
+!3 = !{i32 3}
diff --git a/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll b/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll
new file mode 100644
index 000000000000..2cd321b05a40
--- /dev/null
+++ b/llvm/test/CodeGen/SPIRV/phi-ptrcast-dominate.ll
@@ -0,0 +1,94 @@
+; The goal of the test is to check that newly inserted `ptrcast` internal
+; intrinsic functions for PHI's operands are inserted at the correct
+; positions, and don't break rules of instruction domination and PHI nodes
+; grouping at top of basic block.
+
+; RUN: llc -O0 -mtriple=spirv64-unknown-unknown %s -o - | FileCheck %s
+; RUN: %if spirv-tools %{ llc -O0 -mtriple=spirv64-unknown-unknown %s -o - -filetype=obj | spirv-val %}
+
+; CHECK-DAG: OpName %[[#Case1:]] "case1"
+; CHECK-DAG: OpName %[[#Case2:]] "case2"
+; CHECK-DAG: OpName %[[#Case3:]] "case3"
+; CHECK: %[[#Case1]] = OpFunction
+; CHECK: OpBranchConditional
+; CHECK: OpPhi
+; CHECK: OpBranch
+; CHECK-COUNT-2: OpBranchConditional
+; CHECK: OpFunctionEnd
+; CHECK: %[[#Case2]] = OpFunction
+; CHECK: OpBranchConditional
+; CHECK: OpPhi
+; CHECK: OpBranch
+; CHECK-COUNT-2: OpBranchConditional
+; CHECK: OpFunctionEnd
+; CHECK: %[[#Case3]] = OpFunction
+; CHECK: OpBranchConditional
+; CHECK: OpPhi
+; CHECK: OpBranch
+; CHECK: OpInBoundsPtrAccessChain
+; CHECK: OpBranchConditional
+; CHECK: OpInBoundsPtrAccessChain
+; CHECK: OpBranchConditional
+; CHECK: OpFunctionEnd
+
+%struct1 = type { i64 }
+%struct2 = type { i64, i64 }
+
+@.str.1 = private unnamed_addr addrspace(1) constant [3 x i8] c"OK\00", align 1
+@.str.2 = private unnamed_addr addrspace(1) constant [6 x i8] c"WRONG\00", align 1
+
+define spir_func void @case1(i1 %b1, i1 %b2, i1 %b3) {
+entry:
+ br i1 %b1, label %l1, label %l2
+
+l1:
+ %str = phi ptr addrspace(1) [ @.str.1, %entry ], [ @.str.2, %l2 ], [ @.str.2, %l3 ]
+ br label %exit
+
+l2:
+ br i1 %b2, label %l1, label %l3
+
+l3:
+ br i1 %b3, label %l1, label %exit
+
+exit:
+ ret void
+}
+
+define spir_func void @case2(i1 %b1, i1 %b2, i1 %b3, ptr addrspace(1) byval(%struct1) %str1, ptr addrspace(1) byval(%struct2) %str2) {
+entry:
+ br i1 %b1, label %l1, label %l2
+
+l1:
+ %str = phi ptr addrspace(1) [ %str1, %entry ], [ %str2, %l2 ], [ %str2, %l3 ]
+ br label %exit
+
+l2:
+ br i1 %b2, label %l1, label %l3
+
+l3:
+ br i1 %b3, label %l1, label %exit
+
+exit:
+ ret void
+}
+
+define spir_func void @case3(i1 %b1, i1 %b2, i1 %b3, ptr addrspace(1) byval(%struct1) %_arg_str1, ptr addrspace(1) byval(%struct2) %_arg_str2) {
+entry:
+ br i1 %b1, label %l1, label %l2
+
+l1:
+ %str = phi ptr addrspace(1) [ %_arg_str1, %entry ], [ %str2, %l2 ], [ %str3, %l3 ]
+ br label %exit
+
+l2:
+ %str2 = getelementptr inbounds %struct2, ptr addrspace(1) %_arg_str2, i32 1
+ br i1 %b2, label %l1, label %l3
+
+l3:
+ %str3 = getelementptr inbounds %struct2, ptr addrspace(1) %_arg_str2, i32 2
+ br i1 %b3, label %l1, label %exit
+
+exit:
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/coalescer-add-implicit-def-subreg-to-reg-regression.ll b/llvm/test/CodeGen/X86/coalescer-add-implicit-def-subreg-to-reg-regression.ll
new file mode 100644
index 000000000000..0e6cb7a3aff2
--- /dev/null
+++ b/llvm/test/CodeGen/X86/coalescer-add-implicit-def-subreg-to-reg-regression.ll
@@ -0,0 +1,45 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=x86_64-unknown-linux-gnu < %s | FileCheck %s
+
+; Not from issue 76416, but separate testcase reported on the same
+; regressing commit.
+define void @other_regression(i1 %cmp.not.i.i.i) {
+; CHECK-LABEL: other_regression:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: pushq %rax
+; CHECK-NEXT: .cfi_def_cfa_offset 16
+; CHECK-NEXT: movl 0, %eax
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: sarl %cl, %eax
+; CHECK-NEXT: movl $1, %edx
+; CHECK-NEXT: xorl %ecx, %ecx
+; CHECK-NEXT: shrl %cl, %edx
+; CHECK-NEXT: imull %eax, %edx
+; CHECK-NEXT: movslq %edx, %rsi
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: xorl %edi, %edi
+; CHECK-NEXT: xorl %edx, %edx
+; CHECK-NEXT: callq *%rax
+entry:
+ br label %for.cond10.preheader
+
+trap: ; preds = %for.body13
+ unreachable
+
+for.cond10.preheader: ; preds = %while.cond.i.i.i, %entry
+ %indvars.iv = phi i64 [ 0, %entry ], [ 1, %while.cond.i.i.i ]
+ %i = trunc i64 %indvars.iv to i32
+ br label %for.body13
+
+for.body13: ; preds = %for.cond10.preheader
+ %i1 = load i32, ptr null, align 4
+ %shr = ashr i32 %i1, %i
+ %shr15 = ashr i32 1, %i
+ %mul16 = mul i32 %shr15, %shr
+ %conv = sext i32 %mul16 to i64
+ call void null(ptr null, i64 %conv, ptr null)
+ br i1 false, label %while.cond.i.i.i, label %trap
+
+while.cond.i.i.i: ; preds = %while.cond.i.i.i, %for.body13
+ br i1 %cmp.not.i.i.i, label %for.cond10.preheader, label %while.cond.i.i.i
+}
diff --git a/llvm/test/CodeGen/X86/exp10-libcall-names.ll b/llvm/test/CodeGen/X86/exp10-libcall-names.ll
new file mode 100644
index 000000000000..ce26a0e738e9
--- /dev/null
+++ b/llvm/test/CodeGen/X86/exp10-libcall-names.ll
@@ -0,0 +1,40 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4
+; RUN: llc -mtriple=x86_64-linux-gnu < %s | FileCheck -check-prefix=LINUX %s
+; RUN: llc -mtriple=x86_64-apple-macos10.9 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=x86_64-apple-ios9.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=x86_64-apple-tvos9.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=x86_64-apple-watchos9.0 < %s | FileCheck -check-prefix=APPLE %s
+; RUN: llc -mtriple=x86_64-apple-xros9.0 < %s | FileCheck -check-prefix=APPLE %s
+
+; RUN: not llc -mtriple=x86_64-apple-macos10.8 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=x86_64-apple-ios8.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=x86_64-apple-tvos8.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+; RUN: not llc -mtriple=x86_64-apple-xros8.0 -filetype=null %s 2>&1 | FileCheck -check-prefix=ERR %s
+
+; Check exp10/exp10f is emitted as __exp10/__exp10f on assorted systems.
+
+; ERR: no libcall available for fexp10
+
+define float @test_exp10_f32(float %x) {
+; LINUX-LABEL: test_exp10_f32:
+; LINUX: # %bb.0:
+; LINUX-NEXT: jmp exp10f@PLT # TAILCALL
+;
+; APPLE-LABEL: test_exp10_f32:
+; APPLE: ## %bb.0:
+; APPLE-NEXT: jmp ___exp10f ## TAILCALL
+ %ret = call float @llvm.exp10.f32(float %x)
+ ret float %ret
+}
+
+define double @test_exp10_f64(double %x) {
+; LINUX-LABEL: test_exp10_f64:
+; LINUX: # %bb.0:
+; LINUX-NEXT: jmp exp10@PLT # TAILCALL
+;
+; APPLE-LABEL: test_exp10_f64:
+; APPLE: ## %bb.0:
+; APPLE-NEXT: jmp ___exp10 ## TAILCALL
+ %ret = call double @llvm.exp10.f64(double %x)
+ ret double %ret
+}
diff --git a/llvm/test/CodeGen/X86/fat-lto-section.ll b/llvm/test/CodeGen/X86/fat-lto-section.ll
index 30c56229a0e2..f3ca8436affb 100644
--- a/llvm/test/CodeGen/X86/fat-lto-section.ll
+++ b/llvm/test/CodeGen/X86/fat-lto-section.ll
@@ -5,6 +5,6 @@
; RUN: | FileCheck %s --check-prefix=EXCLUDE
; EXCLUDE: Name Type {{.*}} ES Flg Lk Inf Al
-; EXCLUDE: .llvm.lto PROGBITS {{.*}} 00 E 0 0 1
+; EXCLUDE: .llvm.lto LLVM_LTO {{.*}} 00 E 0 0 1
@a = global i32 1
diff --git a/llvm/test/CodeGen/X86/issue76416.ll b/llvm/test/CodeGen/X86/issue76416.ll
new file mode 100644
index 000000000000..d0f7fe684a84
--- /dev/null
+++ b/llvm/test/CodeGen/X86/issue76416.ll
@@ -0,0 +1,78 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc -mtriple=x86_64-unknown-freebsd15.0 < %s | FileCheck %s
+
+%struct.anon.5.28.78.99.149.119 = type { [4 x i8] }
+
+@vga_load_state_p = external dso_local global ptr, align 8
+@vga_load_state_data = external dso_local global i8, align 1
+
+define dso_local void @vga_load_state() #0 {
+; CHECK-LABEL: vga_load_state:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: cmpl $3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jg .LBB0_3
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_2: # %for.body
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: xorl %eax, %eax
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: incl -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: cmpl $3, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jle .LBB0_2
+; CHECK-NEXT: .LBB0_3: # %for.end
+; CHECK-NEXT: movl $0, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: .p2align 4, 0x90
+; CHECK-NEXT: .LBB0_4: # %for.cond1
+; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: #APP
+; CHECK-NEXT: #NO_APP
+; CHECK-NEXT: movq vga_load_state_p(%rip), %rax
+; CHECK-NEXT: movslq -{{[0-9]+}}(%rsp), %rcx
+; CHECK-NEXT: movzbl (%rax,%rcx), %eax
+; CHECK-NEXT: movb %al, vga_load_state_data(%rip)
+; CHECK-NEXT: leal 1(%rcx), %eax
+; CHECK-NEXT: movl %eax, -{{[0-9]+}}(%rsp)
+; CHECK-NEXT: jmp .LBB0_4
+entry:
+ %i = alloca i32, align 4
+ store i32 0, ptr %i, align 4
+ br label %for.cond
+
+for.cond: ; preds = %for.body, %entry
+ %i1 = load i32, ptr %i, align 4
+ %cmp = icmp slt i32 %i1, 4
+ br i1 %cmp, label %for.body, label %for.end
+
+for.body: ; preds = %for.cond
+ call void asm sideeffect "", "{ax},~{dirflag},~{fpsr},~{flags}"(i8 0) #1
+ %i2 = load i32, ptr %i, align 4
+ %inc = add nsw i32 %i2, 1
+ store i32 %inc, ptr %i, align 4
+ br label %for.cond
+
+for.end: ; preds = %for.cond
+ store i32 0, ptr %i, align 4
+ br label %for.cond1
+
+for.cond1: ; preds = %for.cond1, %for.end
+ call void asm sideeffect "", "N{dx},~{dirflag},~{fpsr},~{flags}"(i32 poison) #1
+ %i3 = load ptr, ptr @vga_load_state_p, align 8
+ %regs = getelementptr inbounds %struct.anon.5.28.78.99.149.119, ptr %i3, i32 0, i32 0
+ %i4 = load i32, ptr %i, align 4
+ %idxprom = sext i32 %i4 to i64
+ %arrayidx = getelementptr inbounds [4 x i8], ptr %regs, i64 0, i64 %idxprom
+ %i5 = load i8, ptr %arrayidx, align 1
+ store i8 %i5, ptr @vga_load_state_data, align 1
+ %i6 = load i32, ptr %i, align 4
+ %inc5 = add nsw i32 %i6, 1
+ store i32 %inc5, ptr %i, align 4
+ br label %for.cond1, !llvm.loop !0
+}
+
+attributes #0 = { "tune-cpu"="generic" }
+attributes #1 = { nounwind }
+
+!0 = distinct !{!0, !1}
+!1 = !{!"llvm.loop.mustprogress"}
diff --git a/llvm/test/CodeGen/X86/misched-critical-path.ll b/llvm/test/CodeGen/X86/misched-critical-path.ll
new file mode 100644
index 000000000000..909692aca2b0
--- /dev/null
+++ b/llvm/test/CodeGen/X86/misched-critical-path.ll
@@ -0,0 +1,240 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin8 -misched-print-dags -o - 2>&1 > /dev/null | FileCheck %s
+
+target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128"
+
+@sc = common global i8 0
+@uc = common global i8 0
+@ss = common global i16 0
+@us = common global i16 0
+@si = common global i32 0
+@ui = common global i32 0
+@sl = common global i64 0
+@ul = common global i64 0
+@sll = common global i64 0
+@ull = common global i64 0
+
+; Regression Test for PR92368.
+;
+; CHECK: SU(75): CMP8rr %49:gr8, %48:gr8, implicit-def $eflags
+; CHECK: Predecessors:
+; CHECK-NEXT: SU(73): Data Latency=0 Reg=%49
+; CHECK-NEXT: SU(74): Out Latency=0
+; CHECK-NEXT: SU(72): Out Latency=0
+; CHECK-NEXT: SU(70): Data Latency=4 Reg=%48
+define void @misched_bug() nounwind {
+entry:
+ %0 = load i8, i8* @sc, align 1
+ %1 = zext i8 %0 to i32
+ %2 = load i8, i8* @uc, align 1
+ %3 = zext i8 %2 to i32
+ %4 = trunc i32 %3 to i8
+ %5 = trunc i32 %1 to i8
+ %pair6 = cmpxchg i8* @sc, i8 %4, i8 %5 monotonic monotonic
+ %6 = extractvalue { i8, i1 } %pair6, 0
+ store i8 %6, i8* @sc, align 1
+ %7 = load i8, i8* @sc, align 1
+ %8 = zext i8 %7 to i32
+ %9 = load i8, i8* @uc, align 1
+ %10 = zext i8 %9 to i32
+ %11 = trunc i32 %10 to i8
+ %12 = trunc i32 %8 to i8
+ %pair13 = cmpxchg i8* @uc, i8 %11, i8 %12 monotonic monotonic
+ %13 = extractvalue { i8, i1 } %pair13, 0
+ store i8 %13, i8* @uc, align 1
+ %14 = load i8, i8* @sc, align 1
+ %15 = sext i8 %14 to i16
+ %16 = zext i16 %15 to i32
+ %17 = load i8, i8* @uc, align 1
+ %18 = zext i8 %17 to i32
+ %19 = bitcast i8* bitcast (i16* @ss to i8*) to i16*
+ %20 = trunc i32 %18 to i16
+ %21 = trunc i32 %16 to i16
+ %pair22 = cmpxchg i16* %19, i16 %20, i16 %21 monotonic monotonic
+ %22 = extractvalue { i16, i1 } %pair22, 0
+ store i16 %22, i16* @ss, align 2
+ %23 = load i8, i8* @sc, align 1
+ %24 = sext i8 %23 to i16
+ %25 = zext i16 %24 to i32
+ %26 = load i8, i8* @uc, align 1
+ %27 = zext i8 %26 to i32
+ %28 = bitcast i8* bitcast (i16* @us to i8*) to i16*
+ %29 = trunc i32 %27 to i16
+ %30 = trunc i32 %25 to i16
+ %pair31 = cmpxchg i16* %28, i16 %29, i16 %30 monotonic monotonic
+ %31 = extractvalue { i16, i1 } %pair31, 0
+ store i16 %31, i16* @us, align 2
+ %32 = load i8, i8* @sc, align 1
+ %33 = sext i8 %32 to i32
+ %34 = load i8, i8* @uc, align 1
+ %35 = zext i8 %34 to i32
+ %36 = bitcast i8* bitcast (i32* @si to i8*) to i32*
+ %pair37 = cmpxchg i32* %36, i32 %35, i32 %33 monotonic monotonic
+ %37 = extractvalue { i32, i1 } %pair37, 0
+ store i32 %37, i32* @si, align 4
+ %38 = load i8, i8* @sc, align 1
+ %39 = sext i8 %38 to i32
+ %40 = load i8, i8* @uc, align 1
+ %41 = zext i8 %40 to i32
+ %42 = bitcast i8* bitcast (i32* @ui to i8*) to i32*
+ %pair43 = cmpxchg i32* %42, i32 %41, i32 %39 monotonic monotonic
+ %43 = extractvalue { i32, i1 } %pair43, 0
+ store i32 %43, i32* @ui, align 4
+ %44 = load i8, i8* @sc, align 1
+ %45 = sext i8 %44 to i64
+ %46 = load i8, i8* @uc, align 1
+ %47 = zext i8 %46 to i64
+ %48 = bitcast i8* bitcast (i64* @sl to i8*) to i64*
+ %pair49 = cmpxchg i64* %48, i64 %47, i64 %45 monotonic monotonic
+ %49 = extractvalue { i64, i1 } %pair49, 0
+ store i64 %49, i64* @sl, align 8
+ %50 = load i8, i8* @sc, align 1
+ %51 = sext i8 %50 to i64
+ %52 = load i8, i8* @uc, align 1
+ %53 = zext i8 %52 to i64
+ %54 = bitcast i8* bitcast (i64* @ul to i8*) to i64*
+ %pair55 = cmpxchg i64* %54, i64 %53, i64 %51 monotonic monotonic
+ %55 = extractvalue { i64, i1 } %pair55, 0
+ store i64 %55, i64* @ul, align 8
+ %56 = load i8, i8* @sc, align 1
+ %57 = sext i8 %56 to i64
+ %58 = load i8, i8* @uc, align 1
+ %59 = zext i8 %58 to i64
+ %60 = bitcast i8* bitcast (i64* @sll to i8*) to i64*
+ %pair61 = cmpxchg i64* %60, i64 %59, i64 %57 monotonic monotonic
+ %61 = extractvalue { i64, i1 } %pair61, 0
+ store i64 %61, i64* @sll, align 8
+ %62 = load i8, i8* @sc, align 1
+ %63 = sext i8 %62 to i64
+ %64 = load i8, i8* @uc, align 1
+ %65 = zext i8 %64 to i64
+ %66 = bitcast i8* bitcast (i64* @ull to i8*) to i64*
+ %pair67 = cmpxchg i64* %66, i64 %65, i64 %63 monotonic monotonic
+ %67 = extractvalue { i64, i1 } %pair67, 0
+ store i64 %67, i64* @ull, align 8
+ %68 = load i8, i8* @sc, align 1
+ %69 = zext i8 %68 to i32
+ %70 = load i8, i8* @uc, align 1
+ %71 = zext i8 %70 to i32
+ %72 = trunc i32 %71 to i8
+ %73 = trunc i32 %69 to i8
+ %pair74 = cmpxchg i8* @sc, i8 %72, i8 %73 monotonic monotonic
+ %74 = extractvalue { i8, i1 } %pair74, 0
+ %75 = icmp eq i8 %74, %72
+ %76 = zext i1 %75 to i8
+ %77 = zext i8 %76 to i32
+ store i32 %77, i32* @ui, align 4
+ %78 = load i8, i8* @sc, align 1
+ %79 = zext i8 %78 to i32
+ %80 = load i8, i8* @uc, align 1
+ %81 = zext i8 %80 to i32
+ %82 = trunc i32 %81 to i8
+ %83 = trunc i32 %79 to i8
+ %pair84 = cmpxchg i8* @uc, i8 %82, i8 %83 monotonic monotonic
+ %84 = extractvalue { i8, i1 } %pair84, 0
+ %85 = icmp eq i8 %84, %82
+ %86 = zext i1 %85 to i8
+ %87 = zext i8 %86 to i32
+ store i32 %87, i32* @ui, align 4
+ %88 = load i8, i8* @sc, align 1
+ %89 = sext i8 %88 to i16
+ %90 = zext i16 %89 to i32
+ %91 = load i8, i8* @uc, align 1
+ %92 = zext i8 %91 to i32
+ %93 = trunc i32 %92 to i8
+ %94 = trunc i32 %90 to i8
+ %pair95 = cmpxchg i8* bitcast (i16* @ss to i8*), i8 %93, i8 %94 monotonic monotonic
+ %95 = extractvalue { i8, i1 } %pair95, 0
+ %96 = icmp eq i8 %95, %93
+ %97 = zext i1 %96 to i8
+ %98 = zext i8 %97 to i32
+ store i32 %98, i32* @ui, align 4
+ %99 = load i8, i8* @sc, align 1
+ %100 = sext i8 %99 to i16
+ %101 = zext i16 %100 to i32
+ %102 = load i8, i8* @uc, align 1
+ %103 = zext i8 %102 to i32
+ %104 = trunc i32 %103 to i8
+ %105 = trunc i32 %101 to i8
+ %pair106 = cmpxchg i8* bitcast (i16* @us to i8*), i8 %104, i8 %105 monotonic monotonic
+ %106 = extractvalue { i8, i1 } %pair106, 0
+ %107 = icmp eq i8 %106, %104
+ %108 = zext i1 %107 to i8
+ %109 = zext i8 %108 to i32
+ store i32 %109, i32* @ui, align 4
+ %110 = load i8, i8* @sc, align 1
+ %111 = sext i8 %110 to i32
+ %112 = load i8, i8* @uc, align 1
+ %113 = zext i8 %112 to i32
+ %114 = trunc i32 %113 to i8
+ %115 = trunc i32 %111 to i8
+ %pair116 = cmpxchg i8* bitcast (i32* @si to i8*), i8 %114, i8 %115 monotonic monotonic
+ %116 = extractvalue { i8, i1 } %pair116, 0
+ %117 = icmp eq i8 %116, %114
+ %118 = zext i1 %117 to i8
+ %119 = zext i8 %118 to i32
+ store i32 %119, i32* @ui, align 4
+ %120 = load i8, i8* @sc, align 1
+ %121 = sext i8 %120 to i32
+ %122 = load i8, i8* @uc, align 1
+ %123 = zext i8 %122 to i32
+ %124 = trunc i32 %123 to i8
+ %125 = trunc i32 %121 to i8
+ %pair126 = cmpxchg i8* bitcast (i32* @ui to i8*), i8 %124, i8 %125 monotonic monotonic
+ %126 = extractvalue { i8, i1 } %pair126, 0
+ %127 = icmp eq i8 %126, %124
+ %128 = zext i1 %127 to i8
+ %129 = zext i8 %128 to i32
+ store i32 %129, i32* @ui, align 4
+ %130 = load i8, i8* @sc, align 1
+ %131 = sext i8 %130 to i64
+ %132 = load i8, i8* @uc, align 1
+ %133 = zext i8 %132 to i64
+ %134 = trunc i64 %133 to i8
+ %135 = trunc i64 %131 to i8
+ %pair136 = cmpxchg i8* bitcast (i64* @sl to i8*), i8 %134, i8 %135 monotonic monotonic
+ %136 = extractvalue { i8, i1 } %pair136, 0
+ %137 = icmp eq i8 %136, %134
+ %138 = zext i1 %137 to i8
+ %139 = zext i8 %138 to i32
+ store i32 %139, i32* @ui, align 4
+ %140 = load i8, i8* @sc, align 1
+ %141 = sext i8 %140 to i64
+ %142 = load i8, i8* @uc, align 1
+ %143 = zext i8 %142 to i64
+ %144 = trunc i64 %143 to i8
+ %145 = trunc i64 %141 to i8
+ %pair146 = cmpxchg i8* bitcast (i64* @ul to i8*), i8 %144, i8 %145 monotonic monotonic
+ %146 = extractvalue { i8, i1 } %pair146, 0
+ %147 = icmp eq i8 %146, %144
+ %148 = zext i1 %147 to i8
+ %149 = zext i8 %148 to i32
+ store i32 %149, i32* @ui, align 4
+ %150 = load i8, i8* @sc, align 1
+ %151 = sext i8 %150 to i64
+ %152 = load i8, i8* @uc, align 1
+ %153 = zext i8 %152 to i64
+ %154 = trunc i64 %153 to i8
+ %155 = trunc i64 %151 to i8
+ %pair156 = cmpxchg i8* bitcast (i64* @sll to i8*), i8 %154, i8 %155 monotonic monotonic
+ %156 = extractvalue { i8, i1 } %pair156, 0
+ %157 = icmp eq i8 %156, %154
+ %158 = zext i1 %157 to i8
+ %159 = zext i8 %158 to i32
+ store i32 %159, i32* @ui, align 4
+ %160 = load i8, i8* @sc, align 1
+ %161 = sext i8 %160 to i64
+ %162 = load i8, i8* @uc, align 1
+ %163 = zext i8 %162 to i64
+ %164 = trunc i64 %163 to i8
+ %165 = trunc i64 %161 to i8
+ %pair166 = cmpxchg i8* bitcast (i64* @ull to i8*), i8 %164, i8 %165 monotonic monotonic
+ %166 = extractvalue { i8, i1 } %pair166, 0
+ %167 = icmp eq i8 %166, %164
+ %168 = zext i1 %167 to i8
+ %169 = zext i8 %168 to i32
+ store i32 %169, i32* @ui, align 4
+ br label %return
+
+return: ; preds = %entry
+ ret void
+}
diff --git a/llvm/test/CodeGen/X86/pr90703.ll b/llvm/test/CodeGen/X86/pr90703.ll
new file mode 100644
index 000000000000..c02342ffeec1
--- /dev/null
+++ b/llvm/test/CodeGen/X86/pr90703.ll
@@ -0,0 +1,21 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 5
+; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+bmi | FileCheck %s
+
+define i64 @pr90730(i64 %x, i64 %y, ptr %p) {
+; CHECK-LABEL: pr90730:
+; CHECK: # %bb.0: # %entry
+; CHECK-NEXT: movabsq $33181731808, %rax # imm = 0x7B9C90BE0
+; CHECK-NEXT: andnq %rax, %rdi, %rax
+; CHECK-NEXT: movq $0, (%rdx)
+; CHECK-NEXT: retq
+entry:
+ %ext = and i64 %y, 1
+ %xor1 = xor i64 %ext, 33181731817
+ %and1 = and i64 %xor1, %x
+ store i64 %and1, ptr %p, align 4
+ %v = load i64, ptr %p, align 4
+ %and2 = and i64 %v, 33181731808
+ %xor2 = xor i64 %and2, 33181731808
+ store i64 0, ptr %p, align 4
+ ret i64 %xor2
+}
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/mem-attr.ll b/llvm/test/Instrumentation/HWAddressSanitizer/mem-attr.ll
new file mode 100644
index 000000000000..c0e370f20213
--- /dev/null
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/mem-attr.ll
@@ -0,0 +1,15 @@
+; Test that HWASan remove writeonly and memory(*) attributes from instrumented functions.
+; RUN: opt -S -passes=hwasan %s | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128-Fn32"
+target triple = "aarch64-unknown-linux-android30"
+
+; CHECK: define dso_local void @test_writeonly(ptr nocapture noundef %p) local_unnamed_addr #0
+define dso_local void @test_writeonly(ptr nocapture noundef writeonly %p) local_unnamed_addr #0 {
+entry:
+ store i32 42, ptr %p, align 4
+ ret void
+}
+
+; CHECK: attributes #0 = { sanitize_hwaddress uwtable }
+attributes #0 = { sanitize_hwaddress memory(argmem: write) uwtable }
diff --git a/llvm/test/Linker/darwin-target-variant.ll b/llvm/test/Linker/darwin-target-variant.ll
new file mode 100644
index 000000000000..7d46b2dda4a9
--- /dev/null
+++ b/llvm/test/Linker/darwin-target-variant.ll
@@ -0,0 +1,42 @@
+; RUN: rm -rf %t && split-file %s %t
+; RUN: llvm-link %t/1.ll %t/2.ll -S -o - | FileCheck %s
+; CHECK: {i32 2, !"darwin.target_variant.triple", !"x86_64-apple-ios13.1-macabi"}
+
+; RUN: llvm-link %t/1.ll %t/old.ll -S -o - | FileCheck %s -check-prefix OLD
+; OLD: {i32 4, !"darwin.target_variant.triple", !"x86_64-apple-ios14.0-macabi"}
+
+;--- 1.ll
+target triple = "x86_64-apple-macos10.15";
+!llvm.module.flags = !{!0, !1, !2};
+!0 = !{i32 2, !"SDK Version", [3 x i32] [ i32 10, i32 15, i32 1 ] };
+!1 = !{i32 2, !"darwin.target_variant.triple", !"x86_64-apple-ios13.1-macabi"};
+!2 = !{i32 2, !"darwin.target_variant.SDK Version", [2 x i32] [ i32 13, i32 2 ] };
+
+define void @foo() {
+entry:
+ ret void
+}
+
+;--- 2.ll
+target triple = "x86_64-apple-macos10.15";
+!llvm.module.flags = !{!0, !1, !2};
+!0 = !{i32 2, !"SDK Version", [3 x i32] [ i32 10, i32 15, i32 1 ] };
+!1 = !{i32 2, !"darwin.target_variant.triple", !"x86_64-apple-ios14.0-macabi"};
+!2 = !{i32 2, !"darwin.target_variant.SDK Version", [2 x i32] [ i32 13, i32 2 ] };
+
+define void @bar() {
+entry:
+ ret void
+}
+
+;--- old.ll
+target triple = "x86_64-apple-macos10.15";
+!llvm.module.flags = !{!0, !1, !2};
+!0 = !{i32 2, !"SDK Version", [3 x i32] [ i32 10, i32 15, i32 1 ] };
+!1 = !{i32 4, !"darwin.target_variant.triple", !"x86_64-apple-ios14.0-macabi"};
+!2 = !{i32 2, !"darwin.target_variant.SDK Version", [2 x i32] [ i32 13, i32 2 ] };
+
+define void @old() {
+entry:
+ ret void
+}
diff --git a/llvm/test/MC/MachO/darwin-target-variant-reverse.ll b/llvm/test/MC/MachO/darwin-target-variant-reverse.ll
index 6d51cd8fffa8..fd527b204546 100644
--- a/llvm/test/MC/MachO/darwin-target-variant-reverse.ll
+++ b/llvm/test/MC/MachO/darwin-target-variant-reverse.ll
@@ -3,7 +3,7 @@
target triple = "x86_64-apple-ios13.1-macabi";
!llvm.module.flags = !{!0, !1, !2};
!0 = !{i32 2, !"SDK Version", [2 x i32] [ i32 13, i32 1 ] };
-!1 = !{i32 1, !"darwin.target_variant.triple", !"x86_64-apple-macos10.15"};
+!1 = !{i32 2, !"darwin.target_variant.triple", !"x86_64-apple-macos10.15"};
!2 = !{i32 2, !"darwin.target_variant.SDK Version", [2 x i32] [ i32 10, i32 15 ] };
define void @foo() {
diff --git a/llvm/test/MC/MachO/darwin-target-variant.ll b/llvm/test/MC/MachO/darwin-target-variant.ll
index d506ed92c9cc..78bd1e98410f 100644
--- a/llvm/test/MC/MachO/darwin-target-variant.ll
+++ b/llvm/test/MC/MachO/darwin-target-variant.ll
@@ -4,7 +4,7 @@
target triple = "x86_64-apple-macos10.15";
!llvm.module.flags = !{!0, !1, !2};
!0 = !{i32 2, !"SDK Version", [3 x i32] [ i32 10, i32 15, i32 1 ] };
-!1 = !{i32 1, !"darwin.target_variant.triple", !"x86_64-apple-ios13.1-macabi"};
+!1 = !{i32 2, !"darwin.target_variant.triple", !"x86_64-apple-ios13.1-macabi"};
!2 = !{i32 2, !"darwin.target_variant.SDK Version", [2 x i32] [ i32 13, i32 2 ] };
define void @foo() {
diff --git a/llvm/test/MC/RISCV/rv32zaamo-invalid.s b/llvm/test/MC/RISCV/rv32zaamo-invalid.s
index fb4dab4542d6..984a0d61e2d0 100644
--- a/llvm/test/MC/RISCV/rv32zaamo-invalid.s
+++ b/llvm/test/MC/RISCV/rv32zaamo-invalid.s
@@ -1,5 +1,5 @@
# RUN: not llvm-mc -triple riscv32 -mattr=+a < %s 2>&1 | FileCheck %s
-# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zaamo < %s 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple riscv32 -mattr=+zaamo < %s 2>&1 | FileCheck %s
# Final operand must have parentheses
amoswap.w a1, a2, a3 # CHECK: :[[@LINE]]:19: error: expected '(' or optional integer offset
diff --git a/llvm/test/MC/RISCV/rv32zaamo-valid.s b/llvm/test/MC/RISCV/rv32zaamo-valid.s
index f6b5799b46f8..d9ba6ef0240b 100644
--- a/llvm/test/MC/RISCV/rv32zaamo-valid.s
+++ b/llvm/test/MC/RISCV/rv32zaamo-valid.s
@@ -8,15 +8,15 @@
# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+a < %s \
# RUN: | llvm-objdump --mattr=+a -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zaamo -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+zaamo -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zaamo -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+zaamo -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zaamo < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zaamo -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+zaamo < %s \
+# RUN: | llvm-objdump --mattr=+zaamo -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zaamo < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zaamo -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zaamo < %s \
+# RUN: | llvm-objdump --mattr=+zaamo -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
# CHECK-ASM-AND-OBJ: amoswap.w a4, ra, (s0)
diff --git a/llvm/test/MC/RISCV/rv32zalrsc-invalid.s b/llvm/test/MC/RISCV/rv32zalrsc-invalid.s
index 9233c978f033..b1eb982a9763 100644
--- a/llvm/test/MC/RISCV/rv32zalrsc-invalid.s
+++ b/llvm/test/MC/RISCV/rv32zalrsc-invalid.s
@@ -1,5 +1,5 @@
# RUN: not llvm-mc -triple riscv32 -mattr=+a < %s 2>&1 | FileCheck %s
-# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zalrsc < %s 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple riscv32 -mattr=+zalrsc < %s 2>&1 | FileCheck %s
# Final operand must have parentheses
lr.w a4, a5 # CHECK: :[[@LINE]]:10: error: expected '(' or optional integer offset
diff --git a/llvm/test/MC/RISCV/rv32zalrsc-valid.s b/llvm/test/MC/RISCV/rv32zalrsc-valid.s
index f59a4df0d667..f84c0fd62f69 100644
--- a/llvm/test/MC/RISCV/rv32zalrsc-valid.s
+++ b/llvm/test/MC/RISCV/rv32zalrsc-valid.s
@@ -8,15 +8,15 @@
# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+a < %s \
# RUN: | llvm-objdump --mattr=+a -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv32 -mattr=+experimental-zalrsc -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv32 -mattr=+zalrsc -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zalrsc -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+zalrsc -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+experimental-zalrsc < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zalrsc -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv32 -mattr=+zalrsc < %s \
+# RUN: | llvm-objdump --mattr=+zalrsc -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zalrsc < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zalrsc -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zalrsc < %s \
+# RUN: | llvm-objdump --mattr=+zalrsc -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
# CHECK-ASM-AND-OBJ: lr.w t0, (t1)
diff --git a/llvm/test/MC/RISCV/rv64zaamo-invalid.s b/llvm/test/MC/RISCV/rv64zaamo-invalid.s
index e00c1ec7bed7..cb219a79bc7e 100644
--- a/llvm/test/MC/RISCV/rv64zaamo-invalid.s
+++ b/llvm/test/MC/RISCV/rv64zaamo-invalid.s
@@ -1,5 +1,5 @@
# RUN: not llvm-mc -triple riscv64 -mattr=+a < %s 2>&1 | FileCheck %s
-# RUN: not llvm-mc -triple riscv64 -mattr=+experimental-zaamo < %s 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple riscv64 -mattr=+zaamo < %s 2>&1 | FileCheck %s
# Final operand must have parentheses
amoswap.d a1, a2, a3 # CHECK: :[[@LINE]]:19: error: expected '(' or optional integer offset
diff --git a/llvm/test/MC/RISCV/rv64zaamo-valid.s b/llvm/test/MC/RISCV/rv64zaamo-valid.s
index 51493b97c875..96d3e619b4c1 100644
--- a/llvm/test/MC/RISCV/rv64zaamo-valid.s
+++ b/llvm/test/MC/RISCV/rv64zaamo-valid.s
@@ -7,13 +7,13 @@
# RUN: not llvm-mc -triple riscv32 -mattr=+a < %s 2>&1 \
# RUN: | FileCheck -check-prefix=CHECK-RV32 %s
#
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zaamo -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+zaamo -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zaamo < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zaamo -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zaamo < %s \
+# RUN: | llvm-objdump --mattr=+zaamo -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
#
-# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zaamo < %s 2>&1 \
+# RUN: not llvm-mc -triple riscv32 -mattr=+zaamo < %s 2>&1 \
# RUN: | FileCheck -check-prefix=CHECK-RV32 %s
# CHECK-ASM-AND-OBJ: amoswap.d a4, ra, (s0)
diff --git a/llvm/test/MC/RISCV/rv64zalrsc-invalid.s b/llvm/test/MC/RISCV/rv64zalrsc-invalid.s
index e2ad2fc49139..4a9d55e752f0 100644
--- a/llvm/test/MC/RISCV/rv64zalrsc-invalid.s
+++ b/llvm/test/MC/RISCV/rv64zalrsc-invalid.s
@@ -1,5 +1,5 @@
# RUN: not llvm-mc -triple riscv64 -mattr=+a < %s 2>&1 | FileCheck %s
-# RUN: not llvm-mc -triple riscv64 -mattr=+experimental-zalrsc < %s 2>&1 | FileCheck %s
+# RUN: not llvm-mc -triple riscv64 -mattr=+zalrsc < %s 2>&1 | FileCheck %s
# Final operand must have parentheses
lr.d a4, a5 # CHECK: :[[@LINE]]:10: error: expected '(' or optional integer offset
diff --git a/llvm/test/MC/RISCV/rv64zalrsc-valid.s b/llvm/test/MC/RISCV/rv64zalrsc-valid.s
index 5f4437250d9d..2bbde96b6e07 100644
--- a/llvm/test/MC/RISCV/rv64zalrsc-valid.s
+++ b/llvm/test/MC/RISCV/rv64zalrsc-valid.s
@@ -7,13 +7,13 @@
# RUN: not llvm-mc -triple riscv32 -mattr=+a < %s 2>&1 \
# RUN: | FileCheck -check-prefix=CHECK-RV32 %s
#
-# RUN: llvm-mc %s -triple=riscv64 -mattr=+experimental-zalrsc -riscv-no-aliases -show-encoding \
+# RUN: llvm-mc %s -triple=riscv64 -mattr=+zalrsc -riscv-no-aliases -show-encoding \
# RUN: | FileCheck -check-prefixes=CHECK-ASM,CHECK-ASM-AND-OBJ %s
-# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+experimental-zalrsc < %s \
-# RUN: | llvm-objdump --mattr=+experimental-zalrsc -M no-aliases -d -r - \
+# RUN: llvm-mc -filetype=obj -triple=riscv64 -mattr=+zalrsc < %s \
+# RUN: | llvm-objdump --mattr=+zalrsc -M no-aliases -d -r - \
# RUN: | FileCheck --check-prefix=CHECK-ASM-AND-OBJ %s
#
-# RUN: not llvm-mc -triple riscv32 -mattr=+experimental-zalrsc < %s 2>&1 \
+# RUN: not llvm-mc -triple riscv32 -mattr=+zalrsc < %s 2>&1 \
# RUN: | FileCheck -check-prefix=CHECK-RV32 %s
# CHECK-ASM-AND-OBJ: lr.d t0, (t1)
diff --git a/llvm/test/TableGen/predicate-patfags.td b/llvm/test/TableGen/predicate-patfags.td
index 2cf29769dc13..39133f324f30 100644
--- a/llvm/test/TableGen/predicate-patfags.td
+++ b/llvm/test/TableGen/predicate-patfags.td
@@ -1,5 +1,7 @@
-// RUN: llvm-tblgen -gen-dag-isel -I %p/../../include -I %p/Common %s 2>&1 | FileCheck -check-prefix=SDAG %s
-// RUN: llvm-tblgen -gen-global-isel -I %p/../../include -I %p/Common %s 2>&1 | FileCheck -check-prefix=GISEL %s
+// RUN: llvm-tblgen -gen-dag-isel -I %p/../../include -I %p/Common %s 2>&1 | FileCheck -check-prefixes=SDAG,SCUSTOM %s
+// RUN: llvm-tblgen -gen-dag-isel -I %p/../../include -I %p/Common %s -DHASONEUSE 2>&1 | FileCheck -check-prefixes=SDAG,SBUILTIN %s
+// RUN: llvm-tblgen -gen-global-isel -I %p/../../include -I %p/Common %s 2>&1 | FileCheck -check-prefixes=GISEL,GCUSTOM %s
+// RUN: llvm-tblgen -gen-global-isel -I %p/../../include -I %p/Common %s -DHASONEUSE 2>&1 | FileCheck -check-prefixes=GISEL,GBUILTIN %s
include "llvm/Target/Target.td"
include "GlobalISelEmitterCommon.td"
@@ -31,11 +33,16 @@ def : GINodeEquiv<G_TGT_MUL24, TGTmul24_impl>;
def TGTmul24_oneuse : PatFrag<
(ops node:$src0, node:$src1),
- (TGTmul24 $src0, $src1),
- [{ return N->hasOneUse(); }]> {
+ (TGTmul24 $src0, $src1)
+#ifndef HASONEUSE
+ , [{ return N->hasOneUse(); }]> {
let GISelPredicateCode = [{
return MRI->hasOneNonDBGUse(MI.getOperand(0).getReg());
}];
+#else
+ > {
+ let HasOneUse = 1;
+#endif
}
// SDAG: OPC_CheckOpcode, TARGET_VAL(ISD::INTRINSIC_W_CHAIN),
@@ -44,19 +51,26 @@ def TGTmul24_oneuse : PatFrag<
// SDAG: OPC_CheckOpcode, TARGET_VAL(TargetISD::MUL24),
// SDAG: OPC_CheckPredicate0, // Predicate_TGTmul24_oneuse
+// SCUSTOM: return N->hasOneUse();
+// SBUILTIN: if (!SDValue(N, 0).hasOneUse()) return false;
+
// GISEL: GIM_CheckOpcode, /*MI*/1, GIMT_Encode2(TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS),
// GISEL: GIM_CheckIntrinsicID, /*MI*/1, /*Op*/1, GIMT_Encode2(Intrinsic::tgt_mul24),
-// GISEL: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
+// GBUILTIN: GIM_CheckHasOneUse, /*MI*/1,
+// GCUSTOM: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
// GISEL: GIM_CheckOpcode, /*MI*/1, GIMT_Encode2(TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS),
// GISEL: GIM_CheckIntrinsicID, /*MI*/1, /*Op*/1, GIMT_Encode2(Intrinsic::tgt_mul24),
-// GISEL: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
+// GBUILTIN: GIM_CheckHasOneUse, /*MI*/1,
+// GCUSTOM: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
// GISEL: GIM_CheckOpcode, /*MI*/1, GIMT_Encode2(MyTarget::G_TGT_MUL24),
-// GISEL: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
+// GBUILTIN: GIM_CheckHasOneUse, /*MI*/1,
+// GCUSTOM: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
// GISEL: GIM_CheckOpcode, /*MI*/1, GIMT_Encode2(MyTarget::G_TGT_MUL24),
-// GISEL: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
+// GBUILTIN: GIM_CheckHasOneUse, /*MI*/1,
+// GCUSTOM: GIM_CheckCxxInsnPredicate, /*MI*/1, /*FnId*/GIMT_Encode2(GICXXPred_MI_Predicate_TGTmul24_oneuse),
def inst_mad24 : I<
(outs GPR32:$dst),
(ins GPR32:$src0, GPR32:$src1, GPR32:$src2),
diff --git a/llvm/test/ThinLTO/X86/funcimport-stats.ll b/llvm/test/ThinLTO/X86/funcimport-stats.ll
index 913b13004c1c..7fcd33855fe1 100644
--- a/llvm/test/ThinLTO/X86/funcimport-stats.ll
+++ b/llvm/test/ThinLTO/X86/funcimport-stats.ll
@@ -9,8 +9,8 @@
; RUN: cat %t4 | grep 'Is importing aliasee' | count 1
; RUN: cat %t4 | FileCheck %s
-; CHECK: - [[NUM_FUNCS:[0-9]+]] functions imported from
-; CHECK-NEXT: - [[NUM_VARS:[0-9]+]] global vars imported from
+; CHECK: - [[NUM_FUNCS:[0-9]+]] function definitions and 0 function declarations imported from
+; CHECK-NEXT: - [[NUM_VARS:[0-9]+]] global vars definition and 0 global vars declaration imported from
; CHECK: [[NUM_FUNCS]] function-import - Number of functions imported in backend
; CHECK-NEXT: [[NUM_FUNCS]] function-import - Number of functions thin link decided to import
diff --git a/llvm/test/ThinLTO/X86/import_callee_declaration.ll b/llvm/test/ThinLTO/X86/import_callee_declaration.ll
new file mode 100644
index 000000000000..43214e3cf941
--- /dev/null
+++ b/llvm/test/ThinLTO/X86/import_callee_declaration.ll
@@ -0,0 +1,181 @@
+; "-debug-only" requires asserts.
+; REQUIRES: asserts
+; RUN: rm -rf %t && split-file %s %t && cd %t
+
+; Generate per-module summaries.
+; RUN: opt -module-summary main.ll -o main.bc
+; RUN: opt -module-summary lib.ll -o lib.bc
+
+; Generate the combined summary and distributed indices.
+
+; - For function import, set 'import-instr-limit' to 7 and fall back to import
+; function declarations.
+; - In main.ll, function 'main' calls 'small_func' and 'large_func'. Both callees
+; are defined in lib.ll. 'small_func' has two indirect callees, one is smaller
+; and the other one is larger. Both callees of 'small_func' are defined in lib.ll.
+; - Given the import limit, in main's combined summary, the import type of 'small_func'
+; and 'small_indirect_callee' will be 'definition', and the import type of
+; 'large_func' and 'large_indirect_callee' will be 'declaration'.
+;
+; The test will disassemble combined summaries and check the import type is
+; correct. Right now postlink optimizer pipeline doesn't do anything (e.g.,
+; import the declaration or de-serialize summary attributes yet) so there is
+; nothing to test more than the summary content.
+;
+; RUN: llvm-lto2 run \
+; RUN: -debug-only=function-import \
+; RUN: -import-instr-limit=7 \
+; RUN: -import-declaration \
+; RUN: -thinlto-distributed-indexes \
+; RUN: -r=main.bc,main,px \
+; RUN: -r=main.bc,small_func, \
+; RUN: -r=main.bc,large_func, \
+; RUN: -r=lib.bc,callee,pl \
+; RUN: -r=lib.bc,large_indirect_callee,px \
+; RUN: -r=lib.bc,small_func,px \
+; RUN: -r=lib.bc,large_func,px \
+; RUN: -r=lib.bc,large_indirect_callee_alias,px \
+; RUN: -r=lib.bc,calleeAddrs,px -o summary main.bc lib.bc 2>&1 | FileCheck %s --check-prefix=DUMP
+;
+; RUN: llvm-lto -thinlto-action=thinlink -import-declaration -import-instr-limit=7 -o combined.index.bc main.bc lib.bc
+; RUN: llvm-lto -thinlto-action=distributedindexes -debug-only=function-import -import-declaration -import-instr-limit=7 -thinlto-index combined.index.bc main.bc lib.bc 2>&1 | FileCheck %s --check-prefix=DUMP
+
+; DUMP: - 2 function definitions and 3 function declarations imported from lib.bc
+
+; First disassemble per-module summary and find out the GUID for {large_func, large_indirect_callee}.
+;
+; RUN: llvm-dis lib.bc -o - | FileCheck %s --check-prefix=LIB-DIS
+; LIB-DIS: [[LARGEFUNC:\^[0-9]+]] = gv: (name: "large_func", summaries: {{.*}}) ; guid = 2418497564662708935
+; LIB-DIS: [[LARGEINDIRECT:\^[0-9]+]] = gv: (name: "large_indirect_callee", summaries: {{.*}}) ; guid = 14343440786664691134
+; LIB-DIS: [[LARGEINDIRECTALIAS:\^[0-9]+]] = gv: (name: "large_indirect_callee_alias", summaries: {{.*}}, aliasee: [[LARGEINDIRECT]]
+;
+; Secondly disassemble main's combined summary and test that large callees are
+; not imported as declarations yet.
+;
+; RUN: llvm-dis main.bc.thinlto.bc -o - | FileCheck %s --check-prefix=MAIN-DIS
+;
+; MAIN-DIS: [[LIBMOD:\^[0-9]+]] = module: (path: "lib.bc", hash: (0, 0, 0, 0, 0))
+; MAIN-DIS-NOT: [[LARGEFUNC:\^[0-9]+]] = gv: (guid: 2418497564662708935, summaries: (function: (module: [[LIBMOD]], flags: ({{.*}} importType: declaration), insts: 8, {{.*}})))
+; MAIN-DIS-NOT: [[LARGEINDIRECT:\^[0-9]+]] = gv: (guid: 14343440786664691134, summaries: (function: (module: [[LIBMOD]], flags: ({{.*}} importType: declaration), insts: 8, {{.*}})))
+; MAIN-DIS-NOT: [[LARGEINDIRECTALIAS:\^[0-9]+]] = gv: (guid: 16730173943625350469, summaries: (alias: (module: [[LIBMOD]], flags: ({{.*}} importType: declaration)
+
+; Run in-process ThinLTO and tests that
+; 1. `callee` remains internalized even if the symbols of its callers
+; (large_func and large_indirect_callee) are exported as declarations and visible to main module.
+; 2. the debugging logs from `function-import` pass are expected.
+
+; RUN: llvm-lto2 run \
+; RUN: -debug-only=function-import \
+; RUN: -save-temps \
+; RUN: -thinlto-threads=1 \
+; RUN: -import-instr-limit=7 \
+; RUN: -import-declaration \
+; RUN: -r=main.bc,main,px \
+; RUN: -r=main.bc,small_func, \
+; RUN: -r=main.bc,large_func, \
+; RUN: -r=lib.bc,callee,pl \
+; RUN: -r=lib.bc,large_indirect_callee,px \
+; RUN: -r=lib.bc,small_func,px \
+; RUN: -r=lib.bc,large_func,px \
+; RUN: -r=lib.bc,large_indirect_callee_alias,px \
+; RUN: -r=lib.bc,calleeAddrs,px -o in-process main.bc lib.bc 2>&1 | FileCheck %s --check-prefix=IMPORTDUMP
+
+; Test import status from debugging logs.
+; TODO: Serialize declaration bit and test declaration bits are correctly set,
+; and extend this test case to test IR once postlink optimizer makes use of
+; the import type for declarations.
+; IMPORTDUMP-DAG: Not importing function 11825436545918268459 callee from lib.cc
+; IMPORTDUMP-DAG: Is importing function declaration 14343440786664691134 large_indirect_callee from lib.cc
+; IMPORTDUMP-DAG: Is importing function definition 13568239288960714650 small_indirect_callee from lib.cc
+; IMPORTDUMP-DAG: Is importing function definition 6976996067367342685 small_func from lib.cc
+; IMPORTDUMP-DAG: Is importing function declaration 2418497564662708935 large_func from lib.cc
+; IMPORTDUMP-DAG: Not importing global 7680325410415171624 calleeAddrs from lib.cc
+; IMPORTDUMP-DAG: Is importing alias declaration 16730173943625350469 large_indirect_callee_alias from lib.cc
+
+; RUN: llvm-dis in-process.1.3.import.bc -o - | FileCheck %s --check-prefix=IMPORT
+
+; RUN: llvm-dis in-process.2.2.internalize.bc -o - | FileCheck %s --check-prefix=INTERNALIZE
+
+; IMPORT-DAG: define available_externally void @small_func
+; IMPORT-DAG: define available_externally hidden void @small_indirect_callee
+; IMPORT-DAG: declare void @large_func
+; IMPORT-NOT: large_indirect_callee
+; IMPORT-NOT: large_indirect_callee_alias
+
+; INTERNALIZE: define internal void @callee()
+
+;--- main.ll
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+define i32 @main() {
+ call void @small_func()
+ call void @large_func()
+ ret i32 0
+}
+
+declare void @small_func()
+
+; large_func without attributes
+declare void @large_func()
+
+;--- lib.ll
+source_filename = "lib.cc"
+target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-i128:128-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-unknown-linux-gnu"
+
+@calleeAddrs = global [3 x ptr] [ptr @large_indirect_callee, ptr @small_indirect_callee, ptr @large_indirect_callee_alias]
+
+define void @callee() #1 {
+ ret void
+}
+
+define void @large_indirect_callee()#2 {
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ ret void
+}
+
+define internal void @small_indirect_callee() #0 {
+ ret void
+}
+
+@large_indirect_callee_alias = alias void(), ptr @large_indirect_callee
+
+define void @small_func() {
+entry:
+ %0 = load ptr, ptr @calleeAddrs
+ call void %0(), !prof !0
+ %1 = load ptr, ptr getelementptr inbounds ([3 x ptr], ptr @calleeAddrs, i64 0, i64 1)
+ call void %1(), !prof !1
+ %2 = load ptr, ptr getelementptr inbounds ([3 x ptr], ptr @calleeAddrs, i64 0, i64 2)
+ call void %2(), !prof !2
+ ret void
+}
+
+define void @large_func() #0 {
+entry:
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ call void @callee()
+ ret void
+}
+
+attributes #0 = { nounwind norecurse }
+
+attributes #1 = { noinline }
+
+attributes #2 = { norecurse }
+
+!0 = !{!"VP", i32 0, i64 1, i64 14343440786664691134, i64 1}
+!1 = !{!"VP", i32 0, i64 1, i64 13568239288960714650, i64 1}
+!2 = !{!"VP", i32 0, i64 1, i64 16730173943625350469, i64 1}
diff --git a/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll b/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll
new file mode 100644
index 000000000000..330c61360e20
--- /dev/null
+++ b/llvm/test/Transforms/Coroutines/coro-lifetime-end.ll
@@ -0,0 +1,142 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes='cgscc(coro-split),simplifycfg,early-cse' -S | FileCheck %s
+
+declare ptr @malloc(i64)
+
+%i8.array = type { [100 x i8] }
+declare void @consume.i8.array(ptr)
+
+@testbool = external local_unnamed_addr global i8, align 1
+
+; testval does not contain an explicit lifetime end. We must assume that it may
+; live across suspension.
+define void @HasNoLifetimeEnd() presplitcoroutine {
+; CHECK-LABEL: define void @HasNoLifetimeEnd() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ID:%.*]] = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr @HasNoLifetimeEnd.resumers)
+; CHECK-NEXT: [[ALLOC:%.*]] = call ptr @malloc(i64 16)
+; CHECK-NEXT: [[VFRAME:%.*]] = call noalias nonnull ptr @llvm.coro.begin(token [[ID]], ptr [[ALLOC]])
+; CHECK-NEXT: store ptr @HasNoLifetimeEnd.resume, ptr [[VFRAME]], align 8
+; CHECK-NEXT: [[DESTROY_ADDR:%.*]] = getelementptr inbounds [[HASNOLIFETIMEEND_FRAME:%.*]], ptr [[VFRAME]], i32 0, i32 1
+; CHECK-NEXT: store ptr @HasNoLifetimeEnd.destroy, ptr [[DESTROY_ADDR]], align 8
+; CHECK-NEXT: [[INDEX_ADDR1:%.*]] = getelementptr inbounds [[HASNOLIFETIMEEND_FRAME]], ptr [[VFRAME]], i32 0, i32 2
+; CHECK-NEXT: call void @consume.i8.array(ptr [[INDEX_ADDR1]])
+; CHECK-NEXT: [[INDEX_ADDR2:%.*]] = getelementptr inbounds [[HASNOLIFETIMEEND_FRAME]], ptr [[VFRAME]], i32 0, i32 3
+; CHECK-NEXT: store i1 false, ptr [[INDEX_ADDR2]], align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ %testval = alloca %i8.array
+ %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
+ %alloc = call ptr @malloc(i64 16) #3
+ %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
+
+ call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @consume.i8.array(ptr %testval)
+
+ %save = call token @llvm.coro.save(ptr null)
+ %suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
+ switch i8 %suspend, label %exit [
+ i8 0, label %await.ready
+ i8 1, label %exit
+ ]
+await.ready:
+ br label %exit
+exit:
+ call i1 @llvm.coro.end(ptr null, i1 false, token none)
+ ret void
+}
+
+define void @LifetimeEndAfterCoroEnd() presplitcoroutine {
+; CHECK-LABEL: define void @LifetimeEndAfterCoroEnd() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ID:%.*]] = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr @LifetimeEndAfterCoroEnd.resumers)
+; CHECK-NEXT: [[ALLOC:%.*]] = call ptr @malloc(i64 16)
+; CHECK-NEXT: [[VFRAME:%.*]] = call noalias nonnull ptr @llvm.coro.begin(token [[ID]], ptr [[ALLOC]])
+; CHECK-NEXT: store ptr @LifetimeEndAfterCoroEnd.resume, ptr [[VFRAME]], align 8
+; CHECK-NEXT: [[DESTROY_ADDR:%.*]] = getelementptr inbounds [[LIFETIMEENDAFTERCOROEND_FRAME:%.*]], ptr [[VFRAME]], i32 0, i32 1
+; CHECK-NEXT: store ptr @LifetimeEndAfterCoroEnd.destroy, ptr [[DESTROY_ADDR]], align 8
+; CHECK-NEXT: [[INDEX_ADDR1:%.*]] = getelementptr inbounds [[LIFETIMEENDAFTERCOROEND_FRAME]], ptr [[VFRAME]], i32 0, i32 2
+; CHECK-NEXT: call void @consume.i8.array(ptr [[INDEX_ADDR1]])
+; CHECK-NEXT: [[INDEX_ADDR2:%.*]] = getelementptr inbounds [[LIFETIMEENDAFTERCOROEND_FRAME]], ptr [[VFRAME]], i32 0, i32 3
+; CHECK-NEXT: store i1 false, ptr [[INDEX_ADDR2]], align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ %testval = alloca %i8.array
+ %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
+ %alloc = call ptr @malloc(i64 16) #3
+ %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
+
+ call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @consume.i8.array(ptr %testval)
+
+ %save = call token @llvm.coro.save(ptr null)
+ %suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
+ switch i8 %suspend, label %exit [
+ i8 0, label %await.ready
+ i8 1, label %exit
+ ]
+await.ready:
+ br label %exit
+exit:
+ call i1 @llvm.coro.end(ptr null, i1 false, token none)
+ call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ ret void
+}
+
+define void @BranchWithoutLifetimeEnd() presplitcoroutine {
+; CHECK-LABEL: define void @BranchWithoutLifetimeEnd() {
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[ID:%.*]] = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr @BranchWithoutLifetimeEnd.resumers)
+; CHECK-NEXT: [[ALLOC:%.*]] = call ptr @malloc(i64 16)
+; CHECK-NEXT: [[VFRAME:%.*]] = call noalias nonnull ptr @llvm.coro.begin(token [[ID]], ptr [[ALLOC]])
+; CHECK-NEXT: store ptr @BranchWithoutLifetimeEnd.resume, ptr [[VFRAME]], align 8
+; CHECK-NEXT: [[DESTROY_ADDR:%.*]] = getelementptr inbounds [[BRANCHWITHOUTLIFETIMEEND_FRAME:%.*]], ptr [[VFRAME]], i32 0, i32 1
+; CHECK-NEXT: store ptr @BranchWithoutLifetimeEnd.destroy, ptr [[DESTROY_ADDR]], align 8
+; CHECK-NEXT: [[TESTVAL:%.*]] = getelementptr inbounds [[BRANCHWITHOUTLIFETIMEEND_FRAME]], ptr [[VFRAME]], i32 0, i32 2
+; CHECK-NEXT: call void @consume.i8.array(ptr [[TESTVAL]])
+; CHECK-NEXT: [[TMP0:%.*]] = load i8, ptr @testbool, align 1
+; CHECK-NEXT: [[INDEX_ADDR1:%.*]] = getelementptr inbounds [[BRANCHWITHOUTLIFETIMEEND_FRAME]], ptr [[VFRAME]], i32 0, i32 3
+; CHECK-NEXT: store i1 false, ptr [[INDEX_ADDR1]], align 1
+; CHECK-NEXT: ret void
+;
+entry:
+ %testval = alloca %i8.array
+ %id = call token @llvm.coro.id(i32 0, ptr null, ptr null, ptr null)
+ %alloc = call ptr @malloc(i64 16) #3
+ %vFrame = call noalias nonnull ptr @llvm.coro.begin(token %id, ptr %alloc)
+
+ call void @llvm.lifetime.start.p0(i64 100, ptr %testval)
+ call void @consume.i8.array(ptr %testval)
+
+ %0 = load i8, ptr @testbool, align 1
+ %tobool = trunc nuw i8 %0 to i1
+ br i1 %tobool, label %if.then, label %if.end
+
+if.then:
+ call void @llvm.lifetime.end.p0(i64 100, ptr %testval)
+ br label %if.end
+
+if.end:
+ %save = call token @llvm.coro.save(ptr null)
+ %suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
+ switch i8 %suspend, label %exit [
+ i8 0, label %await.ready
+ i8 1, label %exit
+ ]
+await.ready:
+ br label %exit
+exit:
+ call i1 @llvm.coro.end(ptr null, i1 false, token none)
+ ret void
+}
+
+
+declare token @llvm.coro.id(i32, ptr readnone, ptr nocapture readonly, ptr)
+declare ptr @llvm.coro.begin(token, ptr writeonly) #3
+declare ptr @llvm.coro.frame() #5
+declare i8 @llvm.coro.suspend(token, i1) #3
+declare i1 @llvm.coro.end(ptr, i1, token) #3
+declare void @llvm.lifetime.start.p0(i64, ptr nocapture) #4
+declare void @llvm.lifetime.end.p0(i64, ptr nocapture) #4
diff --git a/llvm/test/Transforms/DivRemPairs/AMDGPU/div-rem-pairs.ll b/llvm/test/Transforms/DivRemPairs/AMDGPU/div-rem-pairs.ll
new file mode 100644
index 000000000000..d01ded9ebbfd
--- /dev/null
+++ b/llvm/test/Transforms/DivRemPairs/AMDGPU/div-rem-pairs.ll
@@ -0,0 +1,129 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=div-rem-pairs -S -mtriple=amdgcn-amd-amdhsa | FileCheck %s
+
+define i32 @basic(ptr %p, i32 %x, i32 %y) {
+; CHECK-LABEL: define i32 @basic(
+; CHECK-SAME: ptr [[P:%.*]], i32 [[X:%.*]], i32 [[Y:%.*]]) {
+; CHECK-NEXT: [[X_FROZEN:%.*]] = freeze i32 [[X]]
+; CHECK-NEXT: [[Y_FROZEN:%.*]] = freeze i32 [[Y]]
+; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[X_FROZEN]], [[Y_FROZEN]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], [[Y_FROZEN]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub i32 [[X_FROZEN]], [[TMP1]]
+; CHECK-NEXT: store i32 [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[REM_DECOMPOSED]]
+;
+ %div = udiv i32 %x, %y
+ %rem = urem i32 %x, %y
+ store i32 %div, ptr %p, align 4
+ ret i32 %rem
+}
+
+define i32 @no_freezes(ptr %p, i32 noundef %x, i32 noundef %y) {
+; CHECK-LABEL: define i32 @no_freezes(
+; CHECK-SAME: ptr [[P:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[X]], [[Y]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], [[Y]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub i32 [[X]], [[TMP1]]
+; CHECK-NEXT: store i32 [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[REM_DECOMPOSED]]
+;
+ %div = udiv i32 %x, %y
+ %rem = urem i32 %x, %y
+ store i32 %div, ptr %p, align 4
+ ret i32 %rem
+}
+
+define i32 @poison_does_not_freeze(ptr %p, i32 noundef %x, i32 noundef %y) {
+; CHECK-LABEL: define i32 @poison_does_not_freeze(
+; CHECK-SAME: ptr [[P:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[X2:%.*]] = shl nuw nsw i32 [[X]], 5
+; CHECK-NEXT: [[Y2:%.*]] = add nuw nsw i32 [[Y]], 1
+; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[X2]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], [[Y2]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub i32 [[X2]], [[TMP1]]
+; CHECK-NEXT: store i32 [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[REM_DECOMPOSED]]
+;
+ %x2 = shl nuw nsw i32 %x, 5
+ %y2 = add nuw nsw i32 %y, 1
+ %div = udiv i32 %x2, %y2
+ %rem = urem i32 %x2, %y2
+ store i32 %div, ptr %p, align 4
+ ret i32 %rem
+}
+
+define i32 @poison_does_not_freeze_signed(ptr %p, i32 noundef %x, i32 noundef %y) {
+; CHECK-LABEL: define i32 @poison_does_not_freeze_signed(
+; CHECK-SAME: ptr [[P:%.*]], i32 noundef [[X:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[X2:%.*]] = shl nuw nsw i32 [[X]], 5
+; CHECK-NEXT: [[Y2:%.*]] = add nuw nsw i32 [[Y]], 1
+; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[X2]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], [[Y2]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub i32 [[X2]], [[TMP1]]
+; CHECK-NEXT: store i32 [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[REM_DECOMPOSED]]
+;
+ %x2 = shl nuw nsw i32 %x, 5
+ %y2 = add nuw nsw i32 %y, 1
+ %div = sdiv i32 %x2, %y2
+ %rem = srem i32 %x2, %y2
+ store i32 %div, ptr %p, align 4
+ ret i32 %rem
+}
+
+define <4 x i8> @poison_does_not_freeze_vector(ptr %p, <4 x i8> noundef %x, <4 x i8> noundef %y) {
+; CHECK-LABEL: define <4 x i8> @poison_does_not_freeze_vector(
+; CHECK-SAME: ptr [[P:%.*]], <4 x i8> noundef [[X:%.*]], <4 x i8> noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[X2:%.*]] = shl nuw nsw <4 x i8> [[X]], <i8 5, i8 5, i8 5, i8 5>
+; CHECK-NEXT: [[Y2:%.*]] = add nuw nsw <4 x i8> [[Y]], <i8 1, i8 1, i8 1, i8 1>
+; CHECK-NEXT: [[DIV:%.*]] = udiv <4 x i8> [[X2]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul <4 x i8> [[DIV]], [[Y2]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub <4 x i8> [[X2]], [[TMP1]]
+; CHECK-NEXT: store <4 x i8> [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret <4 x i8> [[REM_DECOMPOSED]]
+;
+ %x2 = shl nuw nsw <4 x i8> %x, <i8 5, i8 5, i8 5, i8 5>
+ %y2 = add nuw nsw <4 x i8> %y, <i8 1, i8 1, i8 1, i8 1>
+ %div = udiv <4 x i8> %x2, %y2
+ %rem = urem <4 x i8> %x2, %y2
+ store <4 x i8> %div, ptr %p, align 4
+ ret <4 x i8> %rem
+}
+
+define i32 @explicit_poison_does_not_freeze(ptr %p, i32 noundef %y) {
+; CHECK-LABEL: define i32 @explicit_poison_does_not_freeze(
+; CHECK-SAME: ptr [[P:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[X:%.*]] = add i32 poison, 1
+; CHECK-NEXT: [[Y2:%.*]] = add nuw nsw i32 [[Y]], 1
+; CHECK-NEXT: [[DIV:%.*]] = udiv i32 [[X]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], [[Y2]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub i32 [[X]], [[TMP1]]
+; CHECK-NEXT: store i32 [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[REM_DECOMPOSED]]
+;
+ %x = add i32 poison, 1
+ %y2 = add nuw nsw i32 %y, 1
+ %div = udiv i32 %x, %y2
+ %rem = urem i32 %x, %y2
+ store i32 %div, ptr %p, align 4
+ ret i32 %rem
+}
+
+define i32 @explicit_poison_does_not_freeze_signed(ptr %p, i32 noundef %y) {
+; CHECK-LABEL: define i32 @explicit_poison_does_not_freeze_signed(
+; CHECK-SAME: ptr [[P:%.*]], i32 noundef [[Y:%.*]]) {
+; CHECK-NEXT: [[X:%.*]] = add i32 poison, 1
+; CHECK-NEXT: [[Y2:%.*]] = add nuw nsw i32 [[Y]], 1
+; CHECK-NEXT: [[DIV:%.*]] = sdiv i32 [[X]], [[Y2]]
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[DIV]], [[Y2]]
+; CHECK-NEXT: [[REM_DECOMPOSED:%.*]] = sub i32 [[X]], [[TMP1]]
+; CHECK-NEXT: store i32 [[DIV]], ptr [[P]], align 4
+; CHECK-NEXT: ret i32 [[REM_DECOMPOSED]]
+;
+ %x = add i32 poison, 1
+ %y2 = add nuw nsw i32 %y, 1
+ %div = sdiv i32 %x, %y2
+ %rem = srem i32 %x, %y2
+ store i32 %div, ptr %p, align 4
+ ret i32 %rem
+}
diff --git a/llvm/test/Transforms/DivRemPairs/AMDGPU/lit.local.cfg b/llvm/test/Transforms/DivRemPairs/AMDGPU/lit.local.cfg
new file mode 100644
index 000000000000..7c492428aec7
--- /dev/null
+++ b/llvm/test/Transforms/DivRemPairs/AMDGPU/lit.local.cfg
@@ -0,0 +1,2 @@
+if not "AMDGPU" in config.root.targets:
+ config.unsupported = True
diff --git a/llvm/test/Transforms/FunctionImport/funcimport.ll b/llvm/test/Transforms/FunctionImport/funcimport.ll
index a0968a67f5ce..635750b33fff 100644
--- a/llvm/test/Transforms/FunctionImport/funcimport.ll
+++ b/llvm/test/Transforms/FunctionImport/funcimport.ll
@@ -166,7 +166,8 @@ declare void @variadic_va_start(...)
; GUID-DAG: GUID {{.*}} is linkoncefunc
; DUMP: Module [[M1:.*]] imports from 1 module
-; DUMP-NEXT: 15 functions imported from [[M2:.*]]
-; DUMP-NEXT: 4 vars imported from [[M2]]
+; DUMP-NEXT: 15 function definitions and 0 function declarations imported from [[M2:.*]]
+; DUMP-NEXT: 4 var definitions and 0 var declarations imported from [[M2]]
+
; DUMP: Imported 15 functions for Module [[M1]]
; DUMP-NEXT: Imported 4 global variables for Module [[M1]]
diff --git a/llvm/test/Transforms/InstCombine/fma.ll b/llvm/test/Transforms/InstCombine/fma.ll
index cf3d7f3c525a..b88250d43428 100644
--- a/llvm/test/Transforms/InstCombine/fma.ll
+++ b/llvm/test/Transforms/InstCombine/fma.ll
@@ -194,8 +194,7 @@ define float @fmuladd_unary_fneg_x_unary_fneg_y(float %x, float %y, float %z) {
define float @fmuladd_fneg_x_fneg_y_fast(float %x, float %y, float %z) {
; CHECK-LABEL: @fmuladd_fneg_x_fneg_y_fast(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[FMULADD:%.*]] = fadd fast float [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[FMULADD:%.*]] = call fast float @llvm.fmuladd.f32(float [[X:%.*]], float [[Y:%.*]], float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMULADD]]
;
%x.fneg = fsub float -0.0, %x
@@ -204,10 +203,27 @@ define float @fmuladd_fneg_x_fneg_y_fast(float %x, float %y, float %z) {
ret float %fmuladd
}
+define float @fmuladd_unfold(float %x, float %y, float %z) {
+; CHECK-LABEL: @fmuladd_unfold(
+; CHECK-NEXT: [[FMULADD:%.*]] = call reassoc contract float @llvm.fmuladd.f32(float [[X:%.*]], float [[Y:%.*]], float [[Z:%.*]])
+; CHECK-NEXT: ret float [[FMULADD]]
+;
+ %fmuladd = call reassoc contract float @llvm.fmuladd.f32(float %x, float %y, float %z)
+ ret float %fmuladd
+}
+
+define <8 x half> @fmuladd_unfold_vec(<8 x half> %x, <8 x half> %y, <8 x half> %z) {
+; CHECK-LABEL: @fmuladd_unfold_vec(
+; CHECK-NEXT: [[FMULADD:%.*]] = call reassoc contract <8 x half> @llvm.fmuladd.v8f16(<8 x half> [[X:%.*]], <8 x half> [[Y:%.*]], <8 x half> [[Z:%.*]])
+; CHECK-NEXT: ret <8 x half> [[FMULADD]]
+;
+ %fmuladd = call reassoc contract <8 x half> @llvm.fmuladd.v8f16(<8 x half> %x, <8 x half> %y, <8 x half> %z)
+ ret <8 x half> %fmuladd
+}
+
define float @fmuladd_unary_fneg_x_unary_fneg_y_fast(float %x, float %y, float %z) {
; CHECK-LABEL: @fmuladd_unary_fneg_x_unary_fneg_y_fast(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[Y:%.*]]
-; CHECK-NEXT: [[FMULADD:%.*]] = fadd fast float [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[FMULADD:%.*]] = call fast float @llvm.fmuladd.f32(float [[X:%.*]], float [[Y:%.*]], float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMULADD]]
;
%x.fneg = fneg float %x
@@ -285,8 +301,7 @@ define float @fmuladd_fabs_x_fabs_x(float %x, float %z) {
define float @fmuladd_fabs_x_fabs_x_fast(float %x, float %z) {
; CHECK-LABEL: @fmuladd_fabs_x_fabs_x_fast(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[X:%.*]], [[X]]
-; CHECK-NEXT: [[FMULADD:%.*]] = fadd fast float [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[FMULADD:%.*]] = call fast float @llvm.fmuladd.f32(float [[X:%.*]], float [[X]], float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMULADD]]
;
%x.fabs = call float @llvm.fabs.f32(float %x)
@@ -312,10 +327,10 @@ define float @fma_k_y_z_fast(float %y, float %z) {
ret float %fma
}
+; Treat fmuladd like an fma intrinsic
define float @fmuladd_k_y_z_fast(float %y, float %z) {
; CHECK-LABEL: @fmuladd_k_y_z_fast(
-; CHECK-NEXT: [[TMP1:%.*]] = fmul fast float [[Y:%.*]], 4.000000e+00
-; CHECK-NEXT: [[FMULADD:%.*]] = fadd fast float [[TMP1]], [[Z:%.*]]
+; CHECK-NEXT: [[FMULADD:%.*]] = call fast float @llvm.fmuladd.f32(float [[Y:%.*]], float 4.000000e+00, float [[Z:%.*]])
; CHECK-NEXT: ret float [[FMULADD]]
;
%fmuladd = call fast float @llvm.fmuladd.f32(float 4.0, float %y, float %z)
diff --git a/llvm/test/Transforms/InstCombine/getelementptr.ll b/llvm/test/Transforms/InstCombine/getelementptr.ll
index 307ed8d2b02b..e82c168ced01 100644
--- a/llvm/test/Transforms/InstCombine/getelementptr.ll
+++ b/llvm/test/Transforms/InstCombine/getelementptr.ll
@@ -267,8 +267,8 @@ define <2 x i1> @test13_fixed_scalable(i64 %X, ptr %P, <2 x i64> %y) nounwind {
; CHECK-NEXT: [[TMP2:%.*]] = call i64 @llvm.vscale.i64()
; CHECK-NEXT: [[TMP3:%.*]] = shl i64 [[TMP2]], 4
; CHECK-NEXT: [[DOTSPLATINSERT1:%.*]] = insertelement <2 x i64> poison, i64 [[TMP3]], i64 0
-; CHECK-NEXT: [[DOTSPLAT2:%.*]] = shufflevector <2 x i64> [[DOTSPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer
-; CHECK-NEXT: [[B_IDX:%.*]] = mul nsw <2 x i64> [[DOTSPLAT2]], [[Y:%.*]]
+; CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <2 x i64> [[DOTSPLATINSERT1]], <2 x i64> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[B_IDX:%.*]] = mul nsw <2 x i64> [[DOTSPLAT]], [[Y:%.*]]
; CHECK-NEXT: [[C:%.*]] = icmp eq <2 x i64> [[A_IDX]], [[B_IDX]]
; CHECK-NEXT: ret <2 x i1> [[C]]
;
diff --git a/llvm/test/Transforms/InstCombine/known-bits.ll b/llvm/test/Transforms/InstCombine/known-bits.ll
index 82cd24027e4e..41b16f3333c1 100644
--- a/llvm/test/Transforms/InstCombine/known-bits.ll
+++ b/llvm/test/Transforms/InstCombine/known-bits.ll
@@ -1698,6 +1698,44 @@ define i32 @test_none(float nofpclass(all) %x) {
ret i32 %and
}
+; We cannot make assumptions about the sign of result of sqrt
+; when the input is a negative value (except for -0).
+define i1 @pr92217() {
+; CHECK-LABEL: @pr92217(
+; CHECK-NEXT: [[X:%.*]] = call float @llvm.sqrt.f32(float 0xC6DEBE9E60000000)
+; CHECK-NEXT: [[Y:%.*]] = bitcast float [[X]] to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[Y]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %x = call float @llvm.sqrt.f32(float 0xC6DEBE9E60000000)
+ %y = bitcast float %x to i32
+ %cmp = icmp slt i32 %y, 0
+ ret i1 %cmp
+}
+
+define i1 @sqrt_negative_input(float nofpclass(nan zero pnorm psub pinf) %a) {
+; CHECK-LABEL: @sqrt_negative_input(
+; CHECK-NEXT: [[X:%.*]] = call float @llvm.sqrt.f32(float [[A:%.*]])
+; CHECK-NEXT: [[Y:%.*]] = bitcast float [[X]] to i32
+; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[Y]], 0
+; CHECK-NEXT: ret i1 [[CMP]]
+;
+ %x = call float @llvm.sqrt.f32(float %a)
+ %y = bitcast float %x to i32
+ %cmp = icmp slt i32 %y, 0
+ ret i1 %cmp
+}
+
+define i1 @sqrt_negative_input_nnan(float nofpclass(nan zero pnorm psub pinf) %a) {
+; CHECK-LABEL: @sqrt_negative_input_nnan(
+; CHECK-NEXT: ret i1 false
+;
+ %x = call nnan float @llvm.sqrt.f32(float %a)
+ %y = bitcast float %x to i32
+ %cmp = icmp slt i32 %y, 0
+ ret i1 %cmp
+}
+
define i8 @test_icmp_add(i8 %n, i8 %n2, i8 %other) {
; CHECK-LABEL: @test_icmp_add(
; CHECK-NEXT: entry:
diff --git a/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll b/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
index ef085d3e7b50..a9cdc8bd2024 100644
--- a/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
+++ b/llvm/test/Transforms/InstCombine/vec_shuffle-inseltpoison.ll
@@ -92,6 +92,23 @@ define <4 x float> @test8(<4 x float> %x, <4 x float> %y) {
%t2 = extractelement <4 x float> %x, i32 3
%t1 = extractelement <4 x float> %y, i32 0
%t128 = insertelement <4 x float> poison, float %t4, i32 0
+ %t130 = insertelement <4 x float> %t128, float poison, i32 1
+ %t132 = insertelement <4 x float> %t130, float %t2, i32 2
+ %t134 = insertelement <4 x float> %t132, float %t1, i32 3
+ ret <4 x float> %t134
+}
+
+; This shouldn't turn into a single shuffle
+define <4 x float> @test8_undef(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: @test8_undef(
+; CHECK-NEXT: [[T132:%.*]] = shufflevector <4 x float> [[X:%.*]], <4 x float> <float poison, float undef, float poison, float poison>, <4 x i32> <i32 1, i32 5, i32 3, i32 poison>
+; CHECK-NEXT: [[T134:%.*]] = shufflevector <4 x float> [[T132]], <4 x float> [[Y:%.*]], <4 x i32> <i32 0, i32 1, i32 2, i32 4>
+; CHECK-NEXT: ret <4 x float> [[T134]]
+;
+ %t4 = extractelement <4 x float> %x, i32 1
+ %t2 = extractelement <4 x float> %x, i32 3
+ %t1 = extractelement <4 x float> %y, i32 0
+ %t128 = insertelement <4 x float> poison, float %t4, i32 0
%t130 = insertelement <4 x float> %t128, float undef, i32 1
%t132 = insertelement <4 x float> %t130, float %t2, i32 2
%t134 = insertelement <4 x float> %t132, float %t1, i32 3
diff --git a/llvm/test/Transforms/InstCombine/vec_shuffle.ll b/llvm/test/Transforms/InstCombine/vec_shuffle.ll
index 919e30f672e4..8c91efb473fa 100644
--- a/llvm/test/Transforms/InstCombine/vec_shuffle.ll
+++ b/llvm/test/Transforms/InstCombine/vec_shuffle.ll
@@ -88,10 +88,11 @@ define <4 x float> @test7(<4 x float> %x) {
ret <4 x float> %r
}
-; This should turn into a single shuffle.
+; This should not turn into a single shuffle.
define <4 x float> @test8(<4 x float> %x, <4 x float> %y) {
; CHECK-LABEL: @test8(
-; CHECK-NEXT: [[T134:%.*]] = shufflevector <4 x float> [[X:%.*]], <4 x float> [[Y:%.*]], <4 x i32> <i32 1, i32 poison, i32 3, i32 4>
+; CHECK-NEXT: [[T132:%.*]] = shufflevector <4 x float> [[X:%.*]], <4 x float> <float poison, float undef, float poison, float poison>, <4 x i32> <i32 1, i32 5, i32 3, i32 poison>
+; CHECK-NEXT: [[T134:%.*]] = shufflevector <4 x float> [[T132]], <4 x float> [[Y:%.*]], <4 x i32> <i32 0, i32 1, i32 2, i32 4>
; CHECK-NEXT: ret <4 x float> [[T134]]
;
%t4 = extractelement <4 x float> %x, i32 1
@@ -1280,6 +1281,17 @@ define <2 x float> @fneg(<2 x float> %x) {
ret <2 x float> %r
}
+define <2 x float> @fneg_not_single_source(<2 x float> %x) {
+; CHECK-LABEL: @fneg_not_single_source(
+; CHECK-NEXT: [[NEG:%.*]] = fneg <2 x float> [[X:%.*]]
+; CHECK-NEXT: [[SPLAT1:%.*]] = insertelement <2 x float> [[NEG]], float undef, i64 1
+; CHECK-NEXT: ret <2 x float> [[SPLAT1]]
+;
+ %neg = fneg <2 x float> %x
+ %splat = shufflevector <2 x float> %neg, <2 x float> undef, <2 x i32> <i32 0, i32 2>
+ ret <2 x float> %splat
+}
+
define <2 x float> @fmul_splat_constant(<2 x float> %x) {
; CHECK-LABEL: @fmul_splat_constant(
; CHECK-NEXT: [[TMP1:%.*]] = fmul <2 x float> [[X:%.*]], <float 4.200000e+01, float poison>
@@ -2330,3 +2342,38 @@ define <4 x i16> @blend_elements_from_load(ptr align 8 %_0) {
%rv = shufflevector <3 x i16> <i16 0, i16 undef, i16 undef>, <3 x i16> %load, <4 x i32> <i32 0, i32 1, i32 3, i32 5>
ret <4 x i16> %rv
}
+
+define i16 @pr92887(<2 x i16> %v) {
+; CHECK-LABEL: @pr92887(
+; CHECK-NEXT: ret i16 undef
+;
+ %v0 = extractelement <2 x i16> %v, i64 0
+ %v0lo = and i16 %v0, 1
+ %v1 = extractelement <2 x i16> %v, i64 1
+ %v1lo = and i16 %v1, 1
+ %ins1 = insertelement <4 x i16> poison, i16 %v0lo, i64 0
+ %ins2 = insertelement <4 x i16> %ins1, i16 %v1lo, i64 1
+ %shuf = shufflevector <4 x i16> %ins2, <4 x i16> <i16 poison, i16 poison, i16 undef, i16 undef>, <4 x i32> <i32 0, i32 1, i32 6, i32 7>
+ %extract = extractelement <4 x i16> %shuf, i32 2
+ ret i16 %extract
+}
+
+define <2 x i32> @not_splat_shuffle1(i32 %x) {
+; CHECK-LABEL: @not_splat_shuffle1(
+; CHECK-NEXT: [[SHUF:%.*]] = insertelement <2 x i32> <i32 poison, i32 undef>, i32 [[X:%.*]], i64 0
+; CHECK-NEXT: ret <2 x i32> [[SHUF]]
+;
+ %vec = insertelement <2 x i32> undef, i32 %x, i32 1
+ %shuf = shufflevector <2 x i32> %vec, <2 x i32> poison, <2 x i32> <i32 1, i32 0>
+ ret <2 x i32> %shuf
+}
+
+define <2 x i32> @not_splat_shuffle2(i32 %x) {
+; CHECK-LABEL: @not_splat_shuffle2(
+; CHECK-NEXT: [[SHUF:%.*]] = insertelement <2 x i32> <i32 poison, i32 undef>, i32 [[X:%.*]], i64 0
+; CHECK-NEXT: ret <2 x i32> [[SHUF]]
+;
+ %vec = insertelement <2 x i32> poison, i32 %x, i32 1
+ %shuf = shufflevector <2 x i32> %vec, <2 x i32> undef, <2 x i32> <i32 1, i32 3>
+ ret <2 x i32> %shuf
+}
diff --git a/llvm/test/Transforms/InstSimplify/known-non-zero.ll b/llvm/test/Transforms/InstSimplify/known-non-zero.ll
index fd2862eb04a2..965c333d306d 100644
--- a/llvm/test/Transforms/InstSimplify/known-non-zero.ll
+++ b/llvm/test/Transforms/InstSimplify/known-non-zero.ll
@@ -400,3 +400,186 @@ define i1 @nonzero_reduce_or_fail(<2 x i8> %xx) {
%r = icmp eq i8 %v, 0
ret i1 %r
}
+
+define i1 @src_x_add_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_add_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = zext i1 %x_eq_0 to i8
+ %v = add i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_add_x_eq_1_fail(i8 %x) {
+; CHECK-LABEL: @src_x_add_x_eq_1_fail(
+; CHECK-NEXT: [[X_EQ_1:%.*]] = icmp eq i8 [[X:%.*]], 1
+; CHECK-NEXT: [[Y:%.*]] = zext i1 [[X_EQ_1]] to i8
+; CHECK-NEXT: [[V:%.*]] = add i8 [[X]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %x_eq_1 = icmp eq i8 %x, 1
+ %y = zext i1 %x_eq_1 to i8
+ %v = add i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_or_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_or_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = sext i1 %x_eq_0 to i8
+ %v = or i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_or_x_sle_0_fail(i8 %x) {
+; CHECK-LABEL: @src_x_or_x_sle_0_fail(
+; CHECK-NEXT: [[X_EQ_0:%.*]] = icmp sle i8 [[X:%.*]], 0
+; CHECK-NEXT: [[Y:%.*]] = sext i1 [[X_EQ_0]] to i8
+; CHECK-NEXT: [[V:%.*]] = or i8 [[X]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %x_eq_0 = icmp sle i8 %x, 0
+ %y = sext i1 %x_eq_0 to i8
+ %v = or i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_xor_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_xor_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = zext i1 %x_eq_0 to i8
+ %v = xor i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_xor_x_ne_0_fail(i8 %x) {
+; CHECK-LABEL: @src_x_xor_x_ne_0_fail(
+; CHECK-NEXT: [[X_NE_0:%.*]] = icmp ne i8 [[X:%.*]], 0
+; CHECK-NEXT: [[Y:%.*]] = zext i1 [[X_NE_0]] to i8
+; CHECK-NEXT: [[V:%.*]] = xor i8 [[X]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %x_ne_0 = icmp ne i8 %x, 0
+ %y = zext i1 %x_ne_0 to i8
+ %v = xor i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_sub0_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_sub0_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = sext i1 %x_eq_0 to i8
+ %v = sub i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_sub0_z_eq_0_fail(i8 %x, i8 %z) {
+; CHECK-LABEL: @src_x_sub0_z_eq_0_fail(
+; CHECK-NEXT: [[Z_EQ_0:%.*]] = icmp eq i8 [[Z:%.*]], 0
+; CHECK-NEXT: [[Y:%.*]] = sext i1 [[Z_EQ_0]] to i8
+; CHECK-NEXT: [[V:%.*]] = sub i8 [[X:%.*]], [[Y]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %z_eq_0 = icmp eq i8 %z, 0
+ %y = sext i1 %z_eq_0 to i8
+ %v = sub i8 %x, %y
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_sub1_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_sub1_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = zext i1 %x_eq_0 to i8
+ %v = sub i8 %y, %x
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_sub1_x_eq_0_or_fail(i8 %x, i1 %c1) {
+; CHECK-LABEL: @src_x_sub1_x_eq_0_or_fail(
+; CHECK-NEXT: [[X_EQ_0:%.*]] = icmp eq i8 [[X:%.*]], 0
+; CHECK-NEXT: [[X_EQ_0_OR:%.*]] = or i1 [[X_EQ_0]], [[C1:%.*]]
+; CHECK-NEXT: [[Y:%.*]] = zext i1 [[X_EQ_0_OR]] to i8
+; CHECK-NEXT: [[V:%.*]] = sub i8 [[Y]], [[X]]
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %x_eq_0_or = or i1 %x_eq_0, %c1
+ %y = zext i1 %x_eq_0_or to i8
+ %v = sub i8 %y, %x
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_umax_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_umax_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = sext i1 %x_eq_0 to i8
+ %v = call i8 @llvm.umax.i8(i8 %y, i8 %x)
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_umax_x_ugt_10_fail(i8 %x) {
+; CHECK-LABEL: @src_x_umax_x_ugt_10_fail(
+; CHECK-NEXT: [[X_UGT_10:%.*]] = icmp ugt i8 [[X:%.*]], 10
+; CHECK-NEXT: [[Y:%.*]] = sext i1 [[X_UGT_10]] to i8
+; CHECK-NEXT: [[V:%.*]] = call i8 @llvm.umax.i8(i8 [[Y]], i8 [[X]])
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %x_ugt_10 = icmp ugt i8 %x, 10
+ %y = sext i1 %x_ugt_10 to i8
+ %v = call i8 @llvm.umax.i8(i8 %y, i8 %x)
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_uadd.sat_x_eq_0(i8 %x) {
+; CHECK-LABEL: @src_x_uadd.sat_x_eq_0(
+; CHECK-NEXT: ret i1 false
+;
+ %x_eq_0 = icmp eq i8 %x, 0
+ %y = zext i1 %x_eq_0 to i8
+ %v = call i8 @llvm.uadd.sat.i8(i8 %y, i8 %x)
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
+define i1 @src_x_uadd.sat_c1_fail(i8 %x, i1 %c1) {
+; CHECK-LABEL: @src_x_uadd.sat_c1_fail(
+; CHECK-NEXT: [[Y:%.*]] = zext i1 [[C1:%.*]] to i8
+; CHECK-NEXT: [[V:%.*]] = call i8 @llvm.uadd.sat.i8(i8 [[Y]], i8 [[X:%.*]])
+; CHECK-NEXT: [[R:%.*]] = icmp eq i8 [[V]], 0
+; CHECK-NEXT: ret i1 [[R]]
+;
+ %y = zext i1 %c1 to i8
+ %v = call i8 @llvm.uadd.sat.i8(i8 %y, i8 %x)
+ %r = icmp eq i8 %v, 0
+ ret i1 %r
+}
+
diff --git a/llvm/test/Transforms/InstSimplify/shufflevector.ll b/llvm/test/Transforms/InstSimplify/shufflevector.ll
index 64087194b0d1..201950516160 100644
--- a/llvm/test/Transforms/InstSimplify/shufflevector.ll
+++ b/llvm/test/Transforms/InstSimplify/shufflevector.ll
@@ -337,174 +337,3 @@ define <4 x i32> @not_fold_identity2(<4 x i32> %x) {
%revshuf = shufflevector <4 x i32> %shuf, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
ret <4 x i32> %revshuf
}
-
-define <4 x i64> @fold_lookthrough_cast(<4 x i32> %x) {
-; CHECK-LABEL: @fold_lookthrough_cast(
-; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[SHUF]] to <4 x i64>
-; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[ZEXT]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: ret <4 x i64> [[REVSHUF]]
-;
- %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %zext = zext <4 x i32> %shuf to <4 x i64>
- %revshuf = shufflevector <4 x i64> %zext, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- ret <4 x i64> %revshuf
-}
-
-define <4 x i64> @not_fold_lookthrough_cast(<4 x i32> %x) {
-; CHECK-LABEL: @not_fold_lookthrough_cast(
-; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[X:%.*]] to <4 x i64>
-; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[ZEXT]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: ret <4 x i64> [[REVSHUF]]
-;
- %zext = zext <4 x i32> %x to <4 x i64>
- %revshuf = shufflevector <4 x i64> %zext, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- ret <4 x i64> %revshuf
-}
-
-define <4 x i64> @not_fold_lookthrough_cast2(<4 x i32> %x) {
-; CHECK-LABEL: @not_fold_lookthrough_cast2(
-; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[SHUF]] to <4 x i64>
-; CHECK-NEXT: ret <4 x i64> [[ZEXT]]
-;
- %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %zext = zext <4 x i32> %shuf to <4 x i64>
- ret <4 x i64> %zext
-}
-
-define i32 @not_fold_lookthrough_bitcast(<4 x i8> %x) {
-; CHECK-LABEL: @not_fold_lookthrough_bitcast(
-; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i8> [[X:%.*]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[BITCAST:%.*]] = bitcast <4 x i8> [[SHUF]] to i32
-; CHECK-NEXT: ret i32 [[BITCAST]]
-;
- %shuf = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %bitcast = bitcast <4 x i8> %shuf to i32
- ret i32 %bitcast
-}
-
-define <8 x i16> @not_fold_lookthrough_bitcast2(<4 x i32> %x, <8 x i16> %y) {
-; CHECK-LABEL: @not_fold_lookthrough_bitcast2(
-; CHECK-NEXT: [[CAST:%.*]] = bitcast <4 x i32> [[X:%.*]] to <8 x i16>
-; CHECK-NEXT: [[OUT:%.*]] = shufflevector <8 x i16> [[Y:%.*]], <8 x i16> [[CAST]], <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
-; CHECK-NEXT: ret <8 x i16> [[OUT]]
-;
- %cast = bitcast <4 x i32> %x to <8 x i16>
- %out = shufflevector <8 x i16> %y, <8 x i16> %cast, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
- ret <8 x i16> %out
-}
-
-define <4 x i32> @fold_lookthrough_binop_same_operands(<4 x i32> %x) {
-; CHECK-LABEL: @fold_lookthrough_binop_same_operands(
-; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[ADD:%.*]] = add <4 x i32> [[SHUF]], [[SHUF]]
-; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[ADD]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: ret <4 x i32> [[REVSHUF]]
-;
- %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %add = add <4 x i32> %shuf, %shuf
- %revshuf = shufflevector <4 x i32> %add, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- ret <4 x i32> %revshuf
-}
-
-define <4 x i32> @fold_lookthrough_binop_different_operands(<4 x i32> %x, <4 x i32> %y) {
-; CHECK-LABEL: @fold_lookthrough_binop_different_operands(
-; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[ADD:%.*]] = add <4 x i32> [[SHUF]], [[Y:%.*]]
-; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[ADD]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: ret <4 x i32> [[REVSHUF]]
-;
- %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %add = add <4 x i32> %shuf, %y
- %revshuf = shufflevector <4 x i32> %add, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- ret <4 x i32> %revshuf
-}
-
-define <4 x i32> @fold_lookthrough_binop_multiuse(<4 x i32> %x) {
-; CHECK-LABEL: @fold_lookthrough_binop_multiuse(
-; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[ADD:%.*]] = add <4 x i32> [[SHUF]], [[SHUF]]
-; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[ADD]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[ADD2:%.*]] = add <4 x i32> [[SHUF]], [[REVSHUF]]
-; CHECK-NEXT: ret <4 x i32> [[ADD2]]
-;
- %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %add = add <4 x i32> %shuf, %shuf
- %revshuf = shufflevector <4 x i32> %add, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %add2 = add <4 x i32> %shuf, %revshuf
- ret <4 x i32> %add2
-}
-
-define <4 x i64> @fold_lookthrough_cast_chain(<4 x i16> %x) {
-; CHECK-LABEL: @fold_lookthrough_cast_chain(
-; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i16> [[X:%.*]], <4 x i16> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i16> [[SHUF]] to <4 x i32>
-; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i32> [[ZEXT]] to <4 x i64>
-; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[SEXT]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: ret <4 x i64> [[REVSHUF]]
-;
- %shuf = shufflevector <4 x i16> %x, <4 x i16> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %zext = zext <4 x i16> %shuf to <4 x i32>
- %sext = sext <4 x i32> %zext to <4 x i64>
- %revshuf = shufflevector <4 x i64> %sext, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- ret <4 x i64> %revshuf
-}
-
-define <4 x i32> @fold_lookthrough_binop_chain(<4 x i32> %x) {
-; CHECK-LABEL: @fold_lookthrough_binop_chain(
-; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[ADD:%.*]] = add <4 x i32> [[SHUF]], [[SHUF]]
-; CHECK-NEXT: [[ADD2:%.*]] = add <4 x i32> [[ADD]], [[ADD]]
-; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[ADD2]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: ret <4 x i32> [[REVSHUF]]
-;
- %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %add = add <4 x i32> %shuf, %shuf
- %add2 = add <4 x i32> %add, %add
- %revshuf = shufflevector <4 x i32> %add2, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- ret <4 x i32> %revshuf
-}
-
-define <4 x i64> @fold_lookthrough_cast_binop_chain(<4 x i32> %x) {
-; CHECK-LABEL: @fold_lookthrough_cast_binop_chain(
-; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[SHUF]] to <4 x i64>
-; CHECK-NEXT: [[ADD:%.*]] = add <4 x i64> [[ZEXT]], [[ZEXT]]
-; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[ADD]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
-; CHECK-NEXT: ret <4 x i64> [[REVSHUF]]
-;
- %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- %zext = zext <4 x i32> %shuf to <4 x i64>
- %add = add <4 x i64> %zext, %zext
- %revshuf = shufflevector <4 x i64> %add, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
- ret <4 x i64> %revshuf
-}
-
-define <4 x i64> @not_fold_cast_mismatched_types(<4 x i32> %x) {
-; CHECK-LABEL: @not_fold_cast_mismatched_types(
-; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i32> [[SHUF]] to <2 x i64>
-; CHECK-NEXT: [[EXTSHUF:%.*]] = shufflevector <2 x i64> [[ZEXT]], <2 x i64> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-; CHECK-NEXT: ret <4 x i64> [[EXTSHUF]]
-;
- %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <2 x i32> <i32 0, i32 2>
- %zext = zext <2 x i32> %shuf to <2 x i64>
- %extshuf = shufflevector <2 x i64> %zext, <2 x i64> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
- ret <4 x i64> %extshuf
-}
-
-define <4 x float> @not_fold_binop_mismatched_types(<4 x float> %x, <4 x float> %y) {
-; CHECK-LABEL: @not_fold_binop_mismatched_types(
-; CHECK-NEXT: [[SHUF_X:%.*]] = shufflevector <4 x float> [[X:%.*]], <4 x float> poison, <2 x i32> <i32 0, i32 2>
-; CHECK-NEXT: [[SHUF_Y:%.*]] = shufflevector <4 x float> [[Y:%.*]], <4 x float> poison, <2 x i32> <i32 1, i32 3>
-; CHECK-NEXT: [[FADD:%.*]] = fadd fast <2 x float> [[SHUF_X]], [[SHUF_Y]]
-; CHECK-NEXT: [[EXTSHUF:%.*]] = shufflevector <2 x float> [[FADD]], <2 x float> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
-; CHECK-NEXT: ret <4 x float> [[EXTSHUF]]
-;
- %shuf.x = shufflevector <4 x float> %x, <4 x float> poison, <2 x i32> <i32 0, i32 2>
- %shuf.y = shufflevector <4 x float> %y, <4 x float> poison, <2 x i32> <i32 1, i32 3>
- %fadd = fadd fast <2 x float> %shuf.x, %shuf.y
- %extshuf = shufflevector <2 x float> %fadd, <2 x float> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
- ret <4 x float> %extshuf
-}
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
index 14b5ee244080..9d8d7036d4f4 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/conditional-branches-cost.ll
@@ -333,103 +333,9 @@ define void @invar_cond_chain_1(ptr %I, ptr noalias %src, i1 %c) {
; DEFAULT-LABEL: define void @invar_cond_chain_1(
; DEFAULT-SAME: ptr [[I:%.*]], ptr noalias [[SRC:%.*]], i1 [[C:%.*]]) {
; DEFAULT-NEXT: entry:
-; DEFAULT-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64
-; DEFAULT-NEXT: [[I1:%.*]] = ptrtoint ptr [[I]] to i64
-; DEFAULT-NEXT: [[TMP29:%.*]] = sub i64 [[I1]], [[SRC2]]
-; DEFAULT-NEXT: [[TMP0:%.*]] = lshr i64 [[TMP29]], 2
-; DEFAULT-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 1
-; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP1]], 8
-; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; DEFAULT: vector.ph:
-; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP1]], 8
-; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP1]], [[N_MOD_VF]]
-; DEFAULT-NEXT: [[TMP2:%.*]] = mul i64 [[N_VEC]], 4
-; DEFAULT-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP2]]
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i1> poison, i1 [[C]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i1> [[BROADCAST_SPLATINSERT]], <4 x i1> poison, <4 x i32> zeroinitializer
-; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]]
-; DEFAULT: vector.body:
-; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE17:%.*]] ]
-; DEFAULT-NEXT: [[OFFSET_IDX:%.*]] = mul i64 [[INDEX]], 4
-; DEFAULT-NEXT: [[TMP3:%.*]] = add i64 [[OFFSET_IDX]], 0
-; DEFAULT-NEXT: [[TMP4:%.*]] = add i64 [[OFFSET_IDX]], 16
-; DEFAULT-NEXT: [[NEXT_GEP:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]]
-; DEFAULT-NEXT: [[NEXT_GEP3:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP4]]
-; DEFAULT-NEXT: [[TMP5:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 0
-; DEFAULT-NEXT: [[TMP6:%.*]] = getelementptr i32, ptr [[NEXT_GEP]], i32 4
-; DEFAULT-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i32>, ptr [[TMP5]], align 4
-; DEFAULT-NEXT: [[WIDE_LOAD3:%.*]] = load <4 x i32>, ptr [[TMP6]], align 4
-; DEFAULT-NEXT: [[TMP7:%.*]] = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i1> [[BROADCAST_SPLAT]], <4 x i1> zeroinitializer
-; DEFAULT-NEXT: [[TMP8:%.*]] = select <4 x i1> <i1 true, i1 true, i1 true, i1 true>, <4 x i1> [[BROADCAST_SPLAT]], <4 x i1> zeroinitializer
-; DEFAULT-NEXT: [[TMP9:%.*]] = or <4 x i1> [[TMP7]], zeroinitializer
-; DEFAULT-NEXT: [[TMP10:%.*]] = or <4 x i1> [[TMP8]], zeroinitializer
-; DEFAULT-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP9]], i32 0
-; DEFAULT-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
-; DEFAULT: pred.store.if:
-; DEFAULT-NEXT: [[TMP12:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP12]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE]]
-; DEFAULT: pred.store.continue:
-; DEFAULT-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP9]], i32 1
-; DEFAULT-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]]
-; DEFAULT: pred.store.if5:
-; DEFAULT-NEXT: [[TMP14:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 1
-; DEFAULT-NEXT: store i32 [[TMP14]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE5]]
-; DEFAULT: pred.store.continue6:
-; DEFAULT-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP9]], i32 2
-; DEFAULT-NEXT: br i1 [[TMP15]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]]
-; DEFAULT: pred.store.if7:
-; DEFAULT-NEXT: [[TMP16:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 2
-; DEFAULT-NEXT: store i32 [[TMP16]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE7]]
-; DEFAULT: pred.store.continue8:
-; DEFAULT-NEXT: [[TMP17:%.*]] = extractelement <4 x i1> [[TMP9]], i32 3
-; DEFAULT-NEXT: br i1 [[TMP17]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9:%.*]]
-; DEFAULT: pred.store.if9:
-; DEFAULT-NEXT: [[TMP18:%.*]] = extractelement <4 x i32> [[WIDE_LOAD]], i32 3
-; DEFAULT-NEXT: store i32 [[TMP18]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE9]]
-; DEFAULT: pred.store.continue10:
-; DEFAULT-NEXT: [[TMP19:%.*]] = extractelement <4 x i1> [[TMP10]], i32 0
-; DEFAULT-NEXT: br i1 [[TMP19]], label [[PRED_STORE_IF10:%.*]], label [[PRED_STORE_CONTINUE11:%.*]]
-; DEFAULT: pred.store.if11:
-; DEFAULT-NEXT: [[TMP20:%.*]] = extractelement <4 x i32> [[WIDE_LOAD3]], i32 0
-; DEFAULT-NEXT: store i32 [[TMP20]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE11]]
-; DEFAULT: pred.store.continue12:
-; DEFAULT-NEXT: [[TMP21:%.*]] = extractelement <4 x i1> [[TMP10]], i32 1
-; DEFAULT-NEXT: br i1 [[TMP21]], label [[PRED_STORE_IF12:%.*]], label [[PRED_STORE_CONTINUE13:%.*]]
-; DEFAULT: pred.store.if13:
-; DEFAULT-NEXT: [[TMP22:%.*]] = extractelement <4 x i32> [[WIDE_LOAD3]], i32 1
-; DEFAULT-NEXT: store i32 [[TMP22]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE13]]
-; DEFAULT: pred.store.continue14:
-; DEFAULT-NEXT: [[TMP23:%.*]] = extractelement <4 x i1> [[TMP10]], i32 2
-; DEFAULT-NEXT: br i1 [[TMP23]], label [[PRED_STORE_IF14:%.*]], label [[PRED_STORE_CONTINUE15:%.*]]
-; DEFAULT: pred.store.if15:
-; DEFAULT-NEXT: [[TMP24:%.*]] = extractelement <4 x i32> [[WIDE_LOAD3]], i32 2
-; DEFAULT-NEXT: store i32 [[TMP24]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE15]]
-; DEFAULT: pred.store.continue16:
-; DEFAULT-NEXT: [[TMP25:%.*]] = extractelement <4 x i1> [[TMP10]], i32 3
-; DEFAULT-NEXT: br i1 [[TMP25]], label [[PRED_STORE_IF16:%.*]], label [[PRED_STORE_CONTINUE17]]
-; DEFAULT: pred.store.if17:
-; DEFAULT-NEXT: [[TMP26:%.*]] = extractelement <4 x i32> [[WIDE_LOAD3]], i32 3
-; DEFAULT-NEXT: store i32 [[TMP26]], ptr [[I]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE17]]
-; DEFAULT: pred.store.continue18:
-; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
-; DEFAULT-NEXT: [[TMP27:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; DEFAULT-NEXT: br i1 [[TMP27]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
-; DEFAULT: middle.block:
-; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP1]], [[N_VEC]]
-; DEFAULT-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP312_LOOPEXIT:%.*]], label [[SCALAR_PH]]
-; DEFAULT: scalar.ph:
-; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[SRC]], [[ENTRY:%.*]] ]
; DEFAULT-NEXT: br label [[FOR_BODY313:%.*]]
; DEFAULT: loop.header:
-; DEFAULT-NEXT: [[__BEGIN3_011973:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INCDEC_PTR329:%.*]], [[IF_END327:%.*]] ]
+; DEFAULT-NEXT: [[__BEGIN3_011973:%.*]] = phi ptr [ [[SRC]], [[ENTRY:%.*]] ], [ [[INCDEC_PTR329:%.*]], [[IF_END327:%.*]] ]
; DEFAULT-NEXT: [[TMP28:%.*]] = load i32, ptr [[__BEGIN3_011973]], align 4
; DEFAULT-NEXT: br i1 true, label [[IF_ELSE321:%.*]], label [[IF_THEN316:%.*]]
; DEFAULT: if:
@@ -444,7 +350,7 @@ define void @invar_cond_chain_1(ptr %I, ptr noalias %src, i1 %c) {
; DEFAULT: loop.latch:
; DEFAULT-NEXT: [[INCDEC_PTR329]] = getelementptr inbounds i8, ptr [[__BEGIN3_011973]], i64 4
; DEFAULT-NEXT: [[CMP311_NOT:%.*]] = icmp eq ptr [[__BEGIN3_011973]], [[I]]
-; DEFAULT-NEXT: br i1 [[CMP311_NOT]], label [[FOR_COND_CLEANUP312_LOOPEXIT]], label [[FOR_BODY313]], !llvm.loop [[LOOP7:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[CMP311_NOT]], label [[EXIT:%.*]], label [[FOR_BODY313]]
; DEFAULT: exit:
; DEFAULT-NEXT: ret void
;
@@ -506,86 +412,9 @@ define void @invar_cond_chain_2(ptr %I, ptr noalias %src, ptr noalias %dst, i32
; DEFAULT-LABEL: define void @invar_cond_chain_2(
; DEFAULT-SAME: ptr [[I:%.*]], ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[A:%.*]]) {
; DEFAULT-NEXT: entry:
-; DEFAULT-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64
-; DEFAULT-NEXT: [[I1:%.*]] = ptrtoint ptr [[I]] to i64
-; DEFAULT-NEXT: [[TMP0:%.*]] = sub i64 [[I1]], [[SRC2]]
-; DEFAULT-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 2
-; DEFAULT-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; DEFAULT-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i64 [[TMP2]], 8
-; DEFAULT-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; DEFAULT: vector.ph:
-; DEFAULT-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[TMP2]], 8
-; DEFAULT-NEXT: [[N_VEC:%.*]] = sub i64 [[TMP2]], [[N_MOD_VF]]
-; DEFAULT-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], 4
-; DEFAULT-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]]
-; DEFAULT-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i32> poison, i32 [[A]], i64 0
-; DEFAULT-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT]], <4 x i32> poison, <4 x i32> zeroinitializer
-; DEFAULT-NEXT: br label [[VECTOR_BODY:%.*]]
-; DEFAULT: vector.body:
-; DEFAULT-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE15:%.*]] ]
-; DEFAULT-NEXT: [[TMP4:%.*]] = icmp sgt <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer
-; DEFAULT-NEXT: [[TMP5:%.*]] = icmp sgt <4 x i32> [[BROADCAST_SPLAT]], zeroinitializer
-; DEFAULT-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP4]], <i1 true, i1 true, i1 true, i1 true>
-; DEFAULT-NEXT: [[TMP7:%.*]] = xor <4 x i1> [[TMP5]], <i1 true, i1 true, i1 true, i1 true>
-; DEFAULT-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP6]], i32 0
-; DEFAULT-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
-; DEFAULT: pred.store.if:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE]]
-; DEFAULT: pred.store.continue:
-; DEFAULT-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP6]], i32 1
-; DEFAULT-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF2:%.*]], label [[PRED_STORE_CONTINUE3:%.*]]
-; DEFAULT: pred.store.if3:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE3]]
-; DEFAULT: pred.store.continue4:
-; DEFAULT-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP6]], i32 2
-; DEFAULT-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF4:%.*]], label [[PRED_STORE_CONTINUE5:%.*]]
-; DEFAULT: pred.store.if5:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE5]]
-; DEFAULT: pred.store.continue6:
-; DEFAULT-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP6]], i32 3
-; DEFAULT-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]]
-; DEFAULT: pred.store.if7:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE7]]
-; DEFAULT: pred.store.continue8:
-; DEFAULT-NEXT: [[TMP12:%.*]] = extractelement <4 x i1> [[TMP7]], i32 0
-; DEFAULT-NEXT: br i1 [[TMP12]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9:%.*]]
-; DEFAULT: pred.store.if9:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE9]]
-; DEFAULT: pred.store.continue10:
-; DEFAULT-NEXT: [[TMP13:%.*]] = extractelement <4 x i1> [[TMP7]], i32 1
-; DEFAULT-NEXT: br i1 [[TMP13]], label [[PRED_STORE_IF10:%.*]], label [[PRED_STORE_CONTINUE11:%.*]]
-; DEFAULT: pred.store.if11:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE11]]
-; DEFAULT: pred.store.continue12:
-; DEFAULT-NEXT: [[TMP14:%.*]] = extractelement <4 x i1> [[TMP7]], i32 2
-; DEFAULT-NEXT: br i1 [[TMP14]], label [[PRED_STORE_IF12:%.*]], label [[PRED_STORE_CONTINUE13:%.*]]
-; DEFAULT: pred.store.if13:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE13]]
-; DEFAULT: pred.store.continue14:
-; DEFAULT-NEXT: [[TMP15:%.*]] = extractelement <4 x i1> [[TMP7]], i32 3
-; DEFAULT-NEXT: br i1 [[TMP15]], label [[PRED_STORE_IF14:%.*]], label [[PRED_STORE_CONTINUE15]]
-; DEFAULT: pred.store.if15:
-; DEFAULT-NEXT: store i32 0, ptr [[DST]], align 4
-; DEFAULT-NEXT: br label [[PRED_STORE_CONTINUE15]]
-; DEFAULT: pred.store.continue16:
-; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
-; DEFAULT-NEXT: [[TMP16:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; DEFAULT-NEXT: br i1 [[TMP16]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
-; DEFAULT: middle.block:
-; DEFAULT-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[TMP2]], [[N_VEC]]
-; DEFAULT-NEXT: br i1 [[CMP_N]], label [[FOR_COND_CLEANUP312_LOOPEXIT:%.*]], label [[SCALAR_PH]]
-; DEFAULT: scalar.ph:
-; DEFAULT-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[SRC]], [[ENTRY:%.*]] ]
; DEFAULT-NEXT: br label [[FOR_BODY313:%.*]]
; DEFAULT: loop.header:
-; DEFAULT-NEXT: [[__BEGIN3_01197:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INCDEC_PTR329:%.*]], [[IF_END327:%.*]] ]
+; DEFAULT-NEXT: [[__BEGIN3_01197:%.*]] = phi ptr [ [[SRC]], [[ENTRY:%.*]] ], [ [[INCDEC_PTR329:%.*]], [[IF_END327:%.*]] ]
; DEFAULT-NEXT: [[CMP315_NOT:%.*]] = icmp sgt i32 [[A]], 0
; DEFAULT-NEXT: br i1 [[CMP315_NOT]], label [[IF_END327]], label [[IF_THEN316:%.*]]
; DEFAULT: if:
@@ -596,74 +425,16 @@ define void @invar_cond_chain_2(ptr %I, ptr noalias %src, ptr noalias %dst, i32
; DEFAULT: loop.latch:
; DEFAULT-NEXT: [[INCDEC_PTR329]] = getelementptr inbounds i8, ptr [[__BEGIN3_01197]], i64 4
; DEFAULT-NEXT: [[CMP311_NOT:%.*]] = icmp eq ptr [[__BEGIN3_01197]], [[I]]
-; DEFAULT-NEXT: br i1 [[CMP311_NOT]], label [[FOR_COND_CLEANUP312_LOOPEXIT]], label [[FOR_BODY313]], !llvm.loop [[LOOP9:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[CMP311_NOT]], label [[EXIT:%.*]], label [[FOR_BODY313]]
; DEFAULT: exit:
; DEFAULT-NEXT: ret void
;
; PRED-LABEL: define void @invar_cond_chain_2(
; PRED-SAME: ptr [[I:%.*]], ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]], i32 [[A:%.*]]) {
; PRED-NEXT: entry:
-; PRED-NEXT: [[SRC2:%.*]] = ptrtoint ptr [[SRC]] to i64
-; PRED-NEXT: [[I1:%.*]] = ptrtoint ptr [[I]] to i64
-; PRED-NEXT: [[TMP0:%.*]] = sub i64 [[I1]], [[SRC2]]
-; PRED-NEXT: [[TMP1:%.*]] = lshr i64 [[TMP0]], 2
-; PRED-NEXT: [[TMP2:%.*]] = add nuw nsw i64 [[TMP1]], 1
-; PRED-NEXT: br i1 false, label [[SCALAR_PH:%.*]], label [[VECTOR_PH:%.*]]
-; PRED: vector.ph:
-; PRED-NEXT: [[N_RND_UP:%.*]] = add i64 [[TMP2]], 3
-; PRED-NEXT: [[N_MOD_VF:%.*]] = urem i64 [[N_RND_UP]], 4
-; PRED-NEXT: [[N_VEC:%.*]] = sub i64 [[N_RND_UP]], [[N_MOD_VF]]
-; PRED-NEXT: [[TMP3:%.*]] = mul i64 [[N_VEC]], 4
-; PRED-NEXT: [[IND_END:%.*]] = getelementptr i8, ptr [[SRC]], i64 [[TMP3]]
-; PRED-NEXT: [[TRIP_COUNT_MINUS_1:%.*]] = sub i64 [[TMP2]], 1
-; PRED-NEXT: [[BROADCAST_SPLATINSERT2:%.*]] = insertelement <4 x i64> poison, i64 [[TRIP_COUNT_MINUS_1]], i64 0
-; PRED-NEXT: [[BROADCAST_SPLAT3:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT2]], <4 x i64> poison, <4 x i32> zeroinitializer
-; PRED-NEXT: [[BROADCAST_SPLATINSERT4:%.*]] = insertelement <4 x i32> poison, i32 [[A]], i64 0
-; PRED-NEXT: [[BROADCAST_SPLAT5:%.*]] = shufflevector <4 x i32> [[BROADCAST_SPLATINSERT4]], <4 x i32> poison, <4 x i32> zeroinitializer
-; PRED-NEXT: br label [[VECTOR_BODY:%.*]]
-; PRED: vector.body:
-; PRED-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[PRED_STORE_CONTINUE11:%.*]] ]
-; PRED-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[INDEX]], i64 0
-; PRED-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <4 x i64> [[BROADCAST_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer
-; PRED-NEXT: [[VEC_IV:%.*]] = add <4 x i64> [[BROADCAST_SPLAT]], <i64 0, i64 1, i64 2, i64 3>
-; PRED-NEXT: [[TMP4:%.*]] = icmp ule <4 x i64> [[VEC_IV]], [[BROADCAST_SPLAT3]]
-; PRED-NEXT: [[TMP5:%.*]] = icmp sgt <4 x i32> [[BROADCAST_SPLAT5]], zeroinitializer
-; PRED-NEXT: [[TMP6:%.*]] = xor <4 x i1> [[TMP5]], <i1 true, i1 true, i1 true, i1 true>
-; PRED-NEXT: [[TMP7:%.*]] = select <4 x i1> [[TMP4]], <4 x i1> [[TMP6]], <4 x i1> zeroinitializer
-; PRED-NEXT: [[TMP8:%.*]] = extractelement <4 x i1> [[TMP7]], i32 0
-; PRED-NEXT: br i1 [[TMP8]], label [[PRED_STORE_IF:%.*]], label [[PRED_STORE_CONTINUE:%.*]]
-; PRED: pred.store.if:
-; PRED-NEXT: store i32 0, ptr [[DST]], align 4
-; PRED-NEXT: br label [[PRED_STORE_CONTINUE]]
-; PRED: pred.store.continue:
-; PRED-NEXT: [[TMP9:%.*]] = extractelement <4 x i1> [[TMP7]], i32 1
-; PRED-NEXT: br i1 [[TMP9]], label [[PRED_STORE_IF6:%.*]], label [[PRED_STORE_CONTINUE7:%.*]]
-; PRED: pred.store.if7:
-; PRED-NEXT: store i32 0, ptr [[DST]], align 4
-; PRED-NEXT: br label [[PRED_STORE_CONTINUE7]]
-; PRED: pred.store.continue8:
-; PRED-NEXT: [[TMP10:%.*]] = extractelement <4 x i1> [[TMP7]], i32 2
-; PRED-NEXT: br i1 [[TMP10]], label [[PRED_STORE_IF8:%.*]], label [[PRED_STORE_CONTINUE9:%.*]]
-; PRED: pred.store.if9:
-; PRED-NEXT: store i32 0, ptr [[DST]], align 4
-; PRED-NEXT: br label [[PRED_STORE_CONTINUE9]]
-; PRED: pred.store.continue10:
-; PRED-NEXT: [[TMP11:%.*]] = extractelement <4 x i1> [[TMP7]], i32 3
-; PRED-NEXT: br i1 [[TMP11]], label [[PRED_STORE_IF10:%.*]], label [[PRED_STORE_CONTINUE11]]
-; PRED: pred.store.if11:
-; PRED-NEXT: store i32 0, ptr [[DST]], align 4
-; PRED-NEXT: br label [[PRED_STORE_CONTINUE11]]
-; PRED: pred.store.continue12:
-; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 4
-; PRED-NEXT: [[TMP12:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
-; PRED-NEXT: br i1 [[TMP12]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
-; PRED: middle.block:
-; PRED-NEXT: br i1 true, label [[FOR_COND_CLEANUP312_LOOPEXIT:%.*]], label [[SCALAR_PH]]
-; PRED: scalar.ph:
-; PRED-NEXT: [[BC_RESUME_VAL:%.*]] = phi ptr [ [[IND_END]], [[MIDDLE_BLOCK]] ], [ [[SRC]], [[ENTRY:%.*]] ]
; PRED-NEXT: br label [[FOR_BODY313:%.*]]
; PRED: loop.header:
-; PRED-NEXT: [[__BEGIN3_01197:%.*]] = phi ptr [ [[BC_RESUME_VAL]], [[SCALAR_PH]] ], [ [[INCDEC_PTR329:%.*]], [[IF_END327:%.*]] ]
+; PRED-NEXT: [[__BEGIN3_01197:%.*]] = phi ptr [ [[SRC]], [[ENTRY:%.*]] ], [ [[INCDEC_PTR329:%.*]], [[IF_END327:%.*]] ]
; PRED-NEXT: [[CMP315_NOT:%.*]] = icmp sgt i32 [[A]], 0
; PRED-NEXT: br i1 [[CMP315_NOT]], label [[IF_END327]], label [[IF_THEN316:%.*]]
; PRED: if:
@@ -674,7 +445,7 @@ define void @invar_cond_chain_2(ptr %I, ptr noalias %src, ptr noalias %dst, i32
; PRED: loop.latch:
; PRED-NEXT: [[INCDEC_PTR329]] = getelementptr inbounds i8, ptr [[__BEGIN3_01197]], i64 4
; PRED-NEXT: [[CMP311_NOT:%.*]] = icmp eq ptr [[__BEGIN3_01197]], [[I]]
-; PRED-NEXT: br i1 [[CMP311_NOT]], label [[FOR_COND_CLEANUP312_LOOPEXIT]], label [[FOR_BODY313]], !llvm.loop [[LOOP5:![0-9]+]]
+; PRED-NEXT: br i1 [[CMP311_NOT]], label [[EXIT:%.*]], label [[FOR_BODY313]]
; PRED: exit:
; PRED-NEXT: ret void
;
@@ -723,7 +494,7 @@ define void @latch_branch_cost(ptr %dst) {
; DEFAULT-NEXT: store <16 x i8> zeroinitializer, ptr [[TMP5]], align 1
; DEFAULT-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 32
; DEFAULT-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 96
-; DEFAULT-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP10:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
; DEFAULT: middle.block:
; DEFAULT-NEXT: br i1 false, label [[FOR_END:%.*]], label [[VEC_EPILOG_ITER_CHECK:%.*]]
; DEFAULT: vec.epilog.iter.check:
@@ -739,7 +510,7 @@ define void @latch_branch_cost(ptr %dst) {
; DEFAULT-NEXT: store <4 x i8> zeroinitializer, ptr [[TMP9]], align 1
; DEFAULT-NEXT: [[INDEX_NEXT2]] = add nuw i64 [[INDEX1]], 4
; DEFAULT-NEXT: [[TMP10:%.*]] = icmp eq i64 [[INDEX_NEXT2]], 100
-; DEFAULT-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP11:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[TMP10]], label [[VEC_EPILOG_MIDDLE_BLOCK:%.*]], label [[VEC_EPILOG_VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
; DEFAULT: vec.epilog.middle.block:
; DEFAULT-NEXT: br i1 true, label [[FOR_END]], label [[SCALAR_PH]]
; DEFAULT: vec.epilog.scalar.ph:
@@ -751,7 +522,7 @@ define void @latch_branch_cost(ptr %dst) {
; DEFAULT-NEXT: store i8 0, ptr [[ARRAYIDX]], align 1
; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[INDVARS_IV]], 1
; DEFAULT-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100
-; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP12:![0-9]+]]
+; DEFAULT-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP8:![0-9]+]]
; DEFAULT: exit:
; DEFAULT-NEXT: ret void
;
@@ -832,7 +603,7 @@ define void @latch_branch_cost(ptr %dst) {
; PRED-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], 8
; PRED-NEXT: [[VEC_IND_NEXT]] = add <8 x i64> [[VEC_IND]], <i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8, i64 8>
; PRED-NEXT: [[TMP25:%.*]] = icmp eq i64 [[INDEX_NEXT]], 104
-; PRED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP6:![0-9]+]]
+; PRED-NEXT: br i1 [[TMP25]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP4:![0-9]+]]
; PRED: middle.block:
; PRED-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; PRED: scalar.ph:
@@ -844,7 +615,7 @@ define void @latch_branch_cost(ptr %dst) {
; PRED-NEXT: store i8 0, ptr [[GEP]], align 1
; PRED-NEXT: [[INDVARS_IV_NEXT]] = add i64 [[IV]], 1
; PRED-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], 100
-; PRED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; PRED-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_END]], label [[FOR_BODY]], !llvm.loop [[LOOP5:![0-9]+]]
; PRED: exit:
; PRED-NEXT: ret void
;
@@ -871,12 +642,8 @@ exit:
; DEFAULT: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
; DEFAULT: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
; DEFAULT: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
-; DEFAULT: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
-; DEFAULT: [[LOOP8]] = distinct !{[[LOOP8]], [[META1]], [[META2]]}
-; DEFAULT: [[LOOP9]] = distinct !{[[LOOP9]], [[META2]], [[META1]]}
-; DEFAULT: [[LOOP10]] = distinct !{[[LOOP10]], [[META1]], [[META2]]}
-; DEFAULT: [[LOOP11]] = distinct !{[[LOOP11]], [[META1]], [[META2]]}
-; DEFAULT: [[LOOP12]] = distinct !{[[LOOP12]], [[META2]], [[META1]]}
+; DEFAULT: [[LOOP7]] = distinct !{[[LOOP7]], [[META1]], [[META2]]}
+; DEFAULT: [[LOOP8]] = distinct !{[[LOOP8]], [[META2]], [[META1]]}
;.
; PRED: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]}
; PRED: [[META1]] = !{!"llvm.loop.isvectorized", i32 1}
@@ -884,6 +651,4 @@ exit:
; PRED: [[LOOP3]] = distinct !{[[LOOP3]], [[META2]], [[META1]]}
; PRED: [[LOOP4]] = distinct !{[[LOOP4]], [[META1]], [[META2]]}
; PRED: [[LOOP5]] = distinct !{[[LOOP5]], [[META2]], [[META1]]}
-; PRED: [[LOOP6]] = distinct !{[[LOOP6]], [[META1]], [[META2]]}
-; PRED: [[LOOP7]] = distinct !{[[LOOP7]], [[META2]], [[META1]]}
;.
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
index d335ac4b6970..200c2adcf0e6 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/masked-call.ll
@@ -402,10 +402,9 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFCOMMON-NEXT: [[TMP11:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> zeroinitializer, <vscale x 2 x i1> [[TMP10]])
; TFCOMMON-NEXT: [[TMP12:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK]], <vscale x 2 x i1> [[TMP8]], <vscale x 2 x i1> zeroinitializer
; TFCOMMON-NEXT: [[TMP13:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[TMP12]])
-; TFCOMMON-NEXT: [[TMP14:%.*]] = or <vscale x 2 x i1> [[TMP10]], [[TMP12]]
; TFCOMMON-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP10]], <vscale x 2 x i64> [[TMP11]], <vscale x 2 x i64> [[TMP13]]
; TFCOMMON-NEXT: [[TMP15:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
-; TFCOMMON-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP15]], i32 8, <vscale x 2 x i1> [[TMP14]])
+; TFCOMMON-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP15]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
; TFCOMMON-NEXT: [[INDEX_NEXT]] = add i64 [[INDEX]], [[TMP6]]
; TFCOMMON-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 2 x i1> @llvm.get.active.lane.mask.nxv2i1.i64(i64 [[INDEX_NEXT]], i64 1025)
; TFCOMMON-NEXT: [[TMP16:%.*]] = xor <vscale x 2 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i64 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer)
@@ -453,16 +452,14 @@ define void @test_widen_if_then_else(ptr noalias %a, ptr readnone %b) #4 {
; TFA_INTERLEAVE-NEXT: [[TMP22:%.*]] = select <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]], <vscale x 2 x i1> [[TMP14]], <vscale x 2 x i1> zeroinitializer
; TFA_INTERLEAVE-NEXT: [[TMP23:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD]], <vscale x 2 x i1> [[TMP21]])
; TFA_INTERLEAVE-NEXT: [[TMP24:%.*]] = call <vscale x 2 x i64> @foo_vector(<vscale x 2 x i64> [[WIDE_MASKED_LOAD3]], <vscale x 2 x i1> [[TMP22]])
-; TFA_INTERLEAVE-NEXT: [[TMP25:%.*]] = or <vscale x 2 x i1> [[TMP17]], [[TMP21]]
-; TFA_INTERLEAVE-NEXT: [[TMP26:%.*]] = or <vscale x 2 x i1> [[TMP18]], [[TMP22]]
; TFA_INTERLEAVE-NEXT: [[PREDPHI:%.*]] = select <vscale x 2 x i1> [[TMP17]], <vscale x 2 x i64> [[TMP19]], <vscale x 2 x i64> [[TMP23]]
; TFA_INTERLEAVE-NEXT: [[PREDPHI4:%.*]] = select <vscale x 2 x i1> [[TMP18]], <vscale x 2 x i64> [[TMP20]], <vscale x 2 x i64> [[TMP24]]
; TFA_INTERLEAVE-NEXT: [[TMP27:%.*]] = getelementptr inbounds i64, ptr [[B:%.*]], i64 [[INDEX]]
; TFA_INTERLEAVE-NEXT: [[TMP28:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP29:%.*]] = mul i64 [[TMP28]], 2
; TFA_INTERLEAVE-NEXT: [[TMP30:%.*]] = getelementptr inbounds i64, ptr [[TMP27]], i64 [[TMP29]]
-; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP27]], i32 8, <vscale x 2 x i1> [[TMP25]])
-; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI4]], ptr [[TMP30]], i32 8, <vscale x 2 x i1> [[TMP26]])
+; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI]], ptr [[TMP27]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK]])
+; TFA_INTERLEAVE-NEXT: call void @llvm.masked.store.nxv2i64.p0(<vscale x 2 x i64> [[PREDPHI4]], ptr [[TMP30]], i32 8, <vscale x 2 x i1> [[ACTIVE_LANE_MASK2]])
; TFA_INTERLEAVE-NEXT: [[INDEX_NEXT:%.*]] = add i64 [[INDEX]], [[TMP6]]
; TFA_INTERLEAVE-NEXT: [[TMP31:%.*]] = call i64 @llvm.vscale.i64()
; TFA_INTERLEAVE-NEXT: [[TMP32:%.*]] = mul i64 [[TMP31]], 2
diff --git a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
index 2b2742ca7ccb..63ad98b2d8ab 100644
--- a/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
+++ b/llvm/test/Transforms/LoopVectorize/AArch64/sve-tail-folding.ll
@@ -480,11 +480,10 @@ define void @cond_uniform_load(ptr noalias %dst, ptr noalias readonly %src, ptr
; CHECK-NEXT: [[TMP15:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP14]], <vscale x 4 x i1> zeroinitializer
; CHECK-NEXT: [[WIDE_MASKED_GATHER:%.*]] = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> [[BROADCAST_SPLAT]], i32 4, <vscale x 4 x i1> [[TMP15]], <vscale x 4 x i32> poison)
; CHECK-NEXT: [[TMP16:%.*]] = select <vscale x 4 x i1> [[ACTIVE_LANE_MASK]], <vscale x 4 x i1> [[TMP13]], <vscale x 4 x i1> zeroinitializer
-; CHECK-NEXT: [[TMP18:%.*]] = or <vscale x 4 x i1> [[TMP15]], [[TMP16]]
; CHECK-NEXT: [[PREDPHI:%.*]] = select <vscale x 4 x i1> [[TMP16]], <vscale x 4 x i32> zeroinitializer, <vscale x 4 x i32> [[WIDE_MASKED_GATHER]]
; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds i32, ptr [[DST:%.*]], i64 [[TMP10]]
; CHECK-NEXT: [[TMP19:%.*]] = getelementptr inbounds i32, ptr [[TMP17]], i32 0
-; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[PREDPHI]], ptr [[TMP19]], i32 4, <vscale x 4 x i1> [[TMP18]])
+; CHECK-NEXT: call void @llvm.masked.store.nxv4i32.p0(<vscale x 4 x i32> [[PREDPHI]], ptr [[TMP19]], i32 4, <vscale x 4 x i1> [[ACTIVE_LANE_MASK]])
; CHECK-NEXT: [[INDEX_NEXT2]] = add i64 [[INDEX1]], [[TMP21]]
; CHECK-NEXT: [[ACTIVE_LANE_MASK_NEXT]] = call <vscale x 4 x i1> @llvm.get.active.lane.mask.nxv4i1.i64(i64 [[INDEX1]], i64 [[TMP9]])
; CHECK-NEXT: [[TMP22:%.*]] = xor <vscale x 4 x i1> [[ACTIVE_LANE_MASK_NEXT]], shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i64 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer)
diff --git a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll
index 7172f0907e77..28c1eef84e25 100644
--- a/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll
+++ b/llvm/test/Transforms/LoopVectorize/LoongArch/defaults.ll
@@ -22,14 +22,20 @@ define void @vector_add(ptr noalias nocapture %a, i64 %v) {
; CHECK: vector.body:
; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
; CHECK-NEXT: [[TMP0:%.*]] = add i64 [[INDEX]], 0
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP0]]
-; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[TMP1]], i32 0
-; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP2]], align 8
-; CHECK-NEXT: [[TMP3:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
-; CHECK-NEXT: store <4 x i64> [[TMP3]], ptr [[TMP2]], align 8
-; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
-; CHECK-NEXT: [[TMP4:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
-; CHECK-NEXT: br i1 [[TMP4]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
+; CHECK-NEXT: [[TMP1:%.*]] = add i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP0]]
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds i64, ptr [[A]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 0
+; CHECK-NEXT: [[TMP5:%.*]] = getelementptr inbounds i64, ptr [[TMP2]], i32 4
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <4 x i64>, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[WIDE_LOAD1:%.*]] = load <4 x i64>, ptr [[TMP5]], align 8
+; CHECK-NEXT: [[TMP6:%.*]] = add <4 x i64> [[WIDE_LOAD]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: [[TMP7:%.*]] = add <4 x i64> [[WIDE_LOAD1]], [[BROADCAST_SPLAT]]
+; CHECK-NEXT: store <4 x i64> [[TMP6]], ptr [[TMP4]], align 8
+; CHECK-NEXT: store <4 x i64> [[TMP7]], ptr [[TMP5]], align 8
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 8
+; CHECK-NEXT: [[TMP8:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024
+; CHECK-NEXT: br i1 [[TMP8]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]]
; CHECK: middle.block:
; CHECK-NEXT: br i1 true, label [[FOR_END:%.*]], label [[SCALAR_PH]]
; CHECK: scalar.ph:
diff --git a/llvm/test/Transforms/LoopVectorize/LoongArch/loongarch-interleaved.ll b/llvm/test/Transforms/LoopVectorize/LoongArch/loongarch-interleaved.ll
new file mode 100644
index 000000000000..be9b170491b9
--- /dev/null
+++ b/llvm/test/Transforms/LoopVectorize/LoongArch/loongarch-interleaved.ll
@@ -0,0 +1,39 @@
+; REQUIRES: asserts
+; RUN: opt --passes=loop-vectorize,dce,instcombine --mtriple loongarch64 \
+; RUN: -S < %s 2>&1 | FileCheck %s
+
+; CHECK-LABEL: foo
+; CHECK: %{{.*}} = add {{.*}}, 2
+
+; Function Attrs: nofree norecurse nosync nounwind writeonly
+define dso_local void @foo(i32 signext %n, ptr nocapture %A) local_unnamed_addr #0 {
+entry:
+ %cmp5 = icmp sgt i32 %n, 0
+ br i1 %cmp5, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader: ; preds = %entry
+ %wide.trip.count = zext i32 %n to i64
+ br label %for.body
+
+for.cond.cleanup.loopexit: ; preds = %for.body
+ br label %for.cond.cleanup
+
+for.cond.cleanup: ; preds = %for.cond.cleanup.loopexit, %entry
+ ret void
+
+for.body: ; preds = %for.body.preheader, %for.body
+ %indvars.iv = phi i64 [ 0, %for.body.preheader ], [ %indvars.iv.next, %for.body ]
+ %arrayidx = getelementptr inbounds i32, ptr %A, i64 %indvars.iv
+ %0 = trunc i64 %indvars.iv to i32
+ store i32 %0, ptr %arrayidx, align 4, !tbaa !4
+ %indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
+ %exitcond.not = icmp eq i64 %indvars.iv.next, %wide.trip.count
+ br i1 %exitcond.not, label %for.cond.cleanup.loopexit, label %for.body, !llvm.loop !8
+}
+
+!4 = !{!5, !5, i64 0}
+!5 = !{!"int", !6, i64 0}
+!6 = !{!"omnipotent char", !7, i64 0}
+!7 = !{!"Simple C/C++ TBAA"}
+!8 = distinct !{!8, !9}
+!9 = !{!"llvm.loop.mustprogress"}
diff --git a/llvm/test/Transforms/NaryReassociate/preserving-debugloc-add-mul.ll b/llvm/test/Transforms/NaryReassociate/preserving-debugloc-add-mul.ll
new file mode 100644
index 000000000000..cc66d0cd3710
--- /dev/null
+++ b/llvm/test/Transforms/NaryReassociate/preserving-debugloc-add-mul.ll
@@ -0,0 +1,69 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4
+; RUN: opt < %s -passes=nary-reassociate -S | FileCheck %s
+
+; Test that NaryReassociate's tryReassociatedBinaryOp() propagates the
+; debug location to new `add` and `mul` from the original binary operator
+; they replaced (`%3` in both `@add_reassociate` and `@mul_reassociate`).
+
+define void @add_reassociate(i32 %a, i32 %b, i32 %c) !dbg !5 {
+; CHECK-LABEL: define void @add_reassociate(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) !dbg [[DBG5:![0-9]+]] {
+; CHECK-NEXT: [[TMP1:%.*]] = add i32 [[A]], [[C]]
+; CHECK-NEXT: call void @foo(i32 [[TMP1]])
+; CHECK-NEXT: [[TMP2:%.*]] = add i32 [[TMP1]], [[B]], !dbg [[DBG8:![0-9]+]]
+; CHECK-NEXT: call void @foo(i32 [[TMP2]])
+; CHECK-NEXT: ret void
+;
+ %1 = add i32 %a, %c
+ call void @foo(i32 %1)
+ %2 = add i32 %b, %c
+ %3 = add i32 %a, %2, !dbg !11
+ call void @foo(i32 %3)
+ ret void
+}
+
+define void @mul_reassociate(i32 %a, i32 %b, i32 %c) !dbg !14 {
+; CHECK-LABEL: define void @mul_reassociate(
+; CHECK-SAME: i32 [[A:%.*]], i32 [[B:%.*]], i32 [[C:%.*]]) !dbg [[DBG9:![0-9]+]] {
+; CHECK-NEXT: [[TMP1:%.*]] = mul i32 [[A]], [[C]]
+; CHECK-NEXT: call void @foo(i32 [[TMP1]])
+; CHECK-NEXT: [[TMP2:%.*]] = mul i32 [[TMP1]], [[B]], !dbg [[DBG10:![0-9]+]]
+; CHECK-NEXT: call void @foo(i32 [[TMP2]])
+; CHECK-NEXT: ret void
+;
+ %1 = mul i32 %a, %c
+ call void @foo(i32 %1)
+ %2 = mul i32 %a, %b
+ %3 = mul i32 %2, %c, !dbg !18
+ call void @foo(i32 %3)
+ ret void
+}
+
+declare void @foo(i32)
+
+!llvm.dbg.cu = !{!0}
+!llvm.debugify = !{!2, !3}
+!llvm.module.flags = !{!4}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C, file: !1, producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+!1 = !DIFile(filename: "test.ll", directory: "/")
+!2 = !{i32 12}
+!3 = !{i32 0}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = distinct !DISubprogram(name: "add_reassociate", linkageName: "add_reassociate", scope: null, file: !1, line: 1, type: !6, scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!6 = !DISubroutineType(types: !7)
+!7 = !{}
+!11 = !DILocation(line: 4, column: 1, scope: !5)
+!14 = distinct !DISubprogram(name: "mul_reassociate", linkageName: "mul_reassociate", scope: null, file: !1, line: 7, type: !6, scopeLine: 7, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !0)
+!18 = !DILocation(line: 10, column: 1, scope: !14)
+
+;.
+; CHECK: [[META0:![0-9]+]] = distinct !DICompileUnit(language: DW_LANG_C, file: [[META1:![0-9]+]], producer: "debugify", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug)
+; CHECK: [[META1]] = !DIFile(filename: "test.ll", directory: {{.*}})
+; CHECK: [[DBG5]] = distinct !DISubprogram(name: "add_reassociate", linkageName: "add_reassociate", scope: null, file: [[META1]], line: 1, type: [[META6:![0-9]+]], scopeLine: 1, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]])
+; CHECK: [[META6]] = !DISubroutineType(types: [[META7:![0-9]+]])
+; CHECK: [[META7]] = !{}
+; CHECK: [[DBG8]] = !DILocation(line: 4, column: 1, scope: [[DBG5]])
+; CHECK: [[DBG9]] = distinct !DISubprogram(name: "mul_reassociate", linkageName: "mul_reassociate", scope: null, file: [[META1]], line: 7, type: [[META6]], scopeLine: 7, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: [[META0]])
+; CHECK: [[DBG10]] = !DILocation(line: 10, column: 1, scope: [[DBG9]])
+;.
diff --git a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
index d048b0bab417..5cbf50e06fbe 100644
--- a/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
+++ b/llvm/test/Transforms/PhaseOrdering/AArch64/matrix-extract-insert.ll
@@ -85,93 +85,316 @@ define void @matrix_extract_insert_loop(i32 %i, ptr nonnull align 8 dereferencea
; CHECK-NEXT: entry:
; CHECK-NEXT: [[CMP210_NOT:%.*]] = icmp eq i32 [[I:%.*]], 0
; CHECK-NEXT: [[CONV6:%.*]] = zext i32 [[I]] to i64
-; CHECK-NEXT: br i1 [[CMP210_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_COND1_PREHEADER_US:%.*]]
-; CHECK: for.cond1.preheader.us:
-; CHECK-NEXT: [[TMP0:%.*]] = icmp ult i32 [[I]], 225
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP0]])
-; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds <225 x double>, ptr [[B:%.*]], i64 0, i64 [[CONV6]]
+; CHECK-NEXT: br i1 [[CMP210_NOT]], label [[FOR_COND_CLEANUP:%.*]], label [[FOR_COND1_PREHEADER_US_PREHEADER:%.*]]
+; CHECK: for.cond1.preheader.us.preheader:
+; CHECK-NEXT: [[TMP0:%.*]] = shl nuw nsw i64 [[CONV6]], 3
+; CHECK-NEXT: [[TMP1:%.*]] = add nuw nsw i64 [[TMP0]], 360
+; CHECK-NEXT: [[SCEVGEP:%.*]] = getelementptr i8, ptr [[B:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[SCEVGEP20:%.*]] = getelementptr i8, ptr [[A:%.*]], i64 [[TMP1]]
+; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i32 [[I]], 225
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP2]])
+; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[CONV6]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK:%.*]] = icmp ult i32 [[I]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK]], label [[FOR_BODY4_US_PREHEADER:%.*]], label [[VECTOR_MEMCHECK:%.*]]
+; CHECK: vector.memcheck:
+; CHECK-NEXT: [[BOUND0:%.*]] = icmp ugt ptr [[SCEVGEP20]], [[B]]
+; CHECK-NEXT: [[BOUND1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[FOUND_CONFLICT:%.*]] = and i1 [[BOUND0]], [[BOUND1]]
+; CHECK-NEXT: br i1 [[FOUND_CONFLICT]], label [[FOR_BODY4_US_PREHEADER]], label [[VECTOR_PH:%.*]]
+; CHECK: vector.ph:
+; CHECK-NEXT: [[N_VEC:%.*]] = and i64 [[CONV6]], 252
+; CHECK-NEXT: br label [[VECTOR_BODY:%.*]]
+; CHECK: vector.body:
+; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ]
+; CHECK-NEXT: [[TMP4:%.*]] = or disjoint i64 [[INDEX]], 1
+; CHECK-NEXT: [[TMP5:%.*]] = or disjoint i64 [[INDEX]], 2
+; CHECK-NEXT: [[TMP6:%.*]] = or disjoint i64 [[INDEX]], 3
+; CHECK-NEXT: [[TMP7:%.*]] = insertelement <2 x i64> poison, i64 [[INDEX]], i64 0
+; CHECK-NEXT: [[TMP8:%.*]] = insertelement <2 x i64> [[TMP7]], i64 [[TMP4]], i64 1
+; CHECK-NEXT: [[TMP9:%.*]] = insertelement <2 x i64> poison, i64 [[TMP5]], i64 0
+; CHECK-NEXT: [[TMP10:%.*]] = insertelement <2 x i64> [[TMP9]], i64 [[TMP6]], i64 1
+; CHECK-NEXT: [[TMP11:%.*]] = icmp ult <2 x i64> [[TMP8]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP12:%.*]] = icmp ult <2 x i64> [[TMP10]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP13:%.*]] = extractelement <2 x i1> [[TMP11]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP13]])
+; CHECK-NEXT: [[TMP14:%.*]] = extractelement <2 x i1> [[TMP11]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP14]])
+; CHECK-NEXT: [[TMP15:%.*]] = extractelement <2 x i1> [[TMP12]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP15]])
+; CHECK-NEXT: [[TMP16:%.*]] = extractelement <2 x i1> [[TMP12]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP16]])
+; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds i8, ptr [[TMP17]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load <2 x double>, ptr [[TMP17]], align 8, !alias.scope [[META0:![0-9]+]]
+; CHECK-NEXT: [[WIDE_LOAD21:%.*]] = load <2 x double>, ptr [[TMP18]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP19:%.*]] = load double, ptr [[TMP3]], align 8, !alias.scope [[META3:![0-9]+]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT22:%.*]] = insertelement <2 x double> poison, double [[TMP19]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT23:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP20:%.*]] = fmul <2 x double> [[WIDE_LOAD]], [[BROADCAST_SPLAT23]]
+; CHECK-NEXT: [[TMP21:%.*]] = fmul <2 x double> [[WIDE_LOAD21]], [[BROADCAST_SPLAT23]]
+; CHECK-NEXT: [[TMP22:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[INDEX]]
+; CHECK-NEXT: [[TMP23:%.*]] = getelementptr inbounds i8, ptr [[TMP22]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD24:%.*]] = load <2 x double>, ptr [[TMP22]], align 8, !alias.scope [[META5:![0-9]+]], !noalias [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD25:%.*]] = load <2 x double>, ptr [[TMP23]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[TMP24:%.*]] = fsub <2 x double> [[WIDE_LOAD24]], [[TMP20]]
+; CHECK-NEXT: [[TMP25:%.*]] = fsub <2 x double> [[WIDE_LOAD25]], [[TMP21]]
+; CHECK-NEXT: store <2 x double> [[TMP24]], ptr [[TMP22]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: store <2 x double> [[TMP25]], ptr [[TMP23]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 4
+; CHECK-NEXT: [[TMP26:%.*]] = icmp eq i64 [[INDEX_NEXT]], [[N_VEC]]
+; CHECK-NEXT: br i1 [[TMP26]], label [[MIDDLE_BLOCK:%.*]], label [[VECTOR_BODY]], !llvm.loop [[LOOP7:![0-9]+]]
+; CHECK: middle.block:
+; CHECK-NEXT: [[CMP_N:%.*]] = icmp eq i64 [[N_VEC]], [[CONV6]]
+; CHECK-NEXT: br i1 [[CMP_N]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US:%.*]], label [[FOR_BODY4_US_PREHEADER]]
+; CHECK: for.body4.us.preheader:
+; CHECK-NEXT: [[INDVARS_IV_PH:%.*]] = phi i64 [ 0, [[VECTOR_MEMCHECK]] ], [ 0, [[FOR_COND1_PREHEADER_US_PREHEADER]] ], [ [[N_VEC]], [[MIDDLE_BLOCK]] ]
; CHECK-NEXT: br label [[FOR_BODY4_US:%.*]]
; CHECK: for.body4.us:
-; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_COND1_PREHEADER_US]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY4_US]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = icmp ult i64 [[INDVARS_IV]], 225
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP2]])
-; CHECK-NEXT: [[TMP3:%.*]] = getelementptr inbounds <225 x double>, ptr [[A:%.*]], i64 0, i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[MATRIXEXT_US:%.*]] = load double, ptr [[TMP3]], align 8
-; CHECK-NEXT: [[MATRIXEXT8_US:%.*]] = load double, ptr [[TMP1]], align 8
+; CHECK-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY4_US]] ], [ [[INDVARS_IV_PH]], [[FOR_BODY4_US_PREHEADER]] ]
+; CHECK-NEXT: [[TMP27:%.*]] = icmp ult i64 [[INDVARS_IV]], 225
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP27]])
+; CHECK-NEXT: [[TMP28:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[MATRIXEXT_US:%.*]] = load double, ptr [[TMP28]], align 8
+; CHECK-NEXT: [[MATRIXEXT8_US:%.*]] = load double, ptr [[TMP3]], align 8
; CHECK-NEXT: [[MUL_US:%.*]] = fmul double [[MATRIXEXT_US]], [[MATRIXEXT8_US]]
-; CHECK-NEXT: [[TMP4:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[INDVARS_IV]]
-; CHECK-NEXT: [[MATRIXEXT11_US:%.*]] = load double, ptr [[TMP4]], align 8
+; CHECK-NEXT: [[TMP29:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[INDVARS_IV]]
+; CHECK-NEXT: [[MATRIXEXT11_US:%.*]] = load double, ptr [[TMP29]], align 8
; CHECK-NEXT: [[SUB_US:%.*]] = fsub double [[MATRIXEXT11_US]], [[MUL_US]]
-; CHECK-NEXT: store double [[SUB_US]], ptr [[TMP4]], align 8
+; CHECK-NEXT: store double [[SUB_US]], ptr [[TMP29]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
; CHECK-NEXT: [[EXITCOND_NOT:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[CONV6]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US:%.*]], label [[FOR_BODY4_US]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]], label [[FOR_BODY4_US]], !llvm.loop [[LOOP10:![0-9]+]]
; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us:
-; CHECK-NEXT: [[TMP5:%.*]] = add nuw nsw i64 [[CONV6]], 15
-; CHECK-NEXT: [[TMP6:%.*]] = icmp ult i32 [[I]], 210
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP6]])
-; CHECK-NEXT: [[TMP7:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP5]]
+; CHECK-NEXT: [[TMP30:%.*]] = add nuw nsw i64 [[CONV6]], 15
+; CHECK-NEXT: [[TMP31:%.*]] = icmp ult i32 [[I]], 210
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP31]])
+; CHECK-NEXT: [[TMP32:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP30]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK_1:%.*]] = icmp ult i32 [[I]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_1]], label [[FOR_BODY4_US_PREHEADER_1:%.*]], label [[VECTOR_MEMCHECK_1:%.*]]
+; CHECK: vector.memcheck.1:
+; CHECK-NEXT: [[BOUND0_1:%.*]] = icmp ugt ptr [[SCEVGEP20]], [[B]]
+; CHECK-NEXT: [[BOUND1_1:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[FOUND_CONFLICT_1:%.*]] = and i1 [[BOUND0_1]], [[BOUND1_1]]
+; CHECK-NEXT: br i1 [[FOUND_CONFLICT_1]], label [[FOR_BODY4_US_PREHEADER_1]], label [[VECTOR_PH_1:%.*]]
+; CHECK: vector.ph.1:
+; CHECK-NEXT: [[N_VEC_1:%.*]] = and i64 [[CONV6]], 252
+; CHECK-NEXT: br label [[VECTOR_BODY_1:%.*]]
+; CHECK: vector.body.1:
+; CHECK-NEXT: [[INDEX_1:%.*]] = phi i64 [ 0, [[VECTOR_PH_1]] ], [ [[INDEX_NEXT_1:%.*]], [[VECTOR_BODY_1]] ]
+; CHECK-NEXT: [[TMP33:%.*]] = add nuw nsw i64 [[INDEX_1]], 15
+; CHECK-NEXT: [[TMP34:%.*]] = add i64 [[INDEX_1]], 16
+; CHECK-NEXT: [[TMP35:%.*]] = insertelement <2 x i64> poison, i64 [[TMP33]], i64 0
+; CHECK-NEXT: [[TMP36:%.*]] = insertelement <2 x i64> [[TMP35]], i64 [[TMP34]], i64 1
+; CHECK-NEXT: [[TMP37:%.*]] = add i64 [[INDEX_1]], 17
+; CHECK-NEXT: [[TMP38:%.*]] = add i64 [[INDEX_1]], 18
+; CHECK-NEXT: [[TMP39:%.*]] = insertelement <2 x i64> poison, i64 [[TMP37]], i64 0
+; CHECK-NEXT: [[TMP40:%.*]] = insertelement <2 x i64> [[TMP39]], i64 [[TMP38]], i64 1
+; CHECK-NEXT: [[TMP41:%.*]] = icmp ult <2 x i64> [[TMP36]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP42:%.*]] = icmp ult <2 x i64> [[TMP40]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP43:%.*]] = extractelement <2 x i1> [[TMP41]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP43]])
+; CHECK-NEXT: [[TMP44:%.*]] = extractelement <2 x i1> [[TMP41]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP44]])
+; CHECK-NEXT: [[TMP45:%.*]] = extractelement <2 x i1> [[TMP42]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP45]])
+; CHECK-NEXT: [[TMP46:%.*]] = extractelement <2 x i1> [[TMP42]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP46]])
+; CHECK-NEXT: [[TMP47:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP33]]
+; CHECK-NEXT: [[TMP48:%.*]] = getelementptr inbounds i8, ptr [[TMP47]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD_1:%.*]] = load <2 x double>, ptr [[TMP47]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD21_1:%.*]] = load <2 x double>, ptr [[TMP48]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP49:%.*]] = load double, ptr [[TMP32]], align 8, !alias.scope [[META3]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT22_1:%.*]] = insertelement <2 x double> poison, double [[TMP49]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT23_1:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22_1]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP50:%.*]] = fmul <2 x double> [[WIDE_LOAD_1]], [[BROADCAST_SPLAT23_1]]
+; CHECK-NEXT: [[TMP51:%.*]] = fmul <2 x double> [[WIDE_LOAD21_1]], [[BROADCAST_SPLAT23_1]]
+; CHECK-NEXT: [[TMP52:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP33]]
+; CHECK-NEXT: [[TMP53:%.*]] = getelementptr inbounds i8, ptr [[TMP52]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD24_1:%.*]] = load <2 x double>, ptr [[TMP52]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD25_1:%.*]] = load <2 x double>, ptr [[TMP53]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[TMP54:%.*]] = fsub <2 x double> [[WIDE_LOAD24_1]], [[TMP50]]
+; CHECK-NEXT: [[TMP55:%.*]] = fsub <2 x double> [[WIDE_LOAD25_1]], [[TMP51]]
+; CHECK-NEXT: store <2 x double> [[TMP54]], ptr [[TMP52]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: store <2 x double> [[TMP55]], ptr [[TMP53]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[INDEX_NEXT_1]] = add nuw i64 [[INDEX_1]], 4
+; CHECK-NEXT: [[TMP56:%.*]] = icmp eq i64 [[INDEX_NEXT_1]], [[N_VEC_1]]
+; CHECK-NEXT: br i1 [[TMP56]], label [[MIDDLE_BLOCK_1:%.*]], label [[VECTOR_BODY_1]], !llvm.loop [[LOOP7]]
+; CHECK: middle.block.1:
+; CHECK-NEXT: [[CMP_N_1:%.*]] = icmp eq i64 [[N_VEC_1]], [[CONV6]]
+; CHECK-NEXT: br i1 [[CMP_N_1]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1:%.*]], label [[FOR_BODY4_US_PREHEADER_1]]
+; CHECK: for.body4.us.preheader.1:
+; CHECK-NEXT: [[INDVARS_IV_PH_1:%.*]] = phi i64 [ 0, [[VECTOR_MEMCHECK_1]] ], [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]] ], [ [[N_VEC_1]], [[MIDDLE_BLOCK_1]] ]
; CHECK-NEXT: br label [[FOR_BODY4_US_1:%.*]]
; CHECK: for.body4.us.1:
-; CHECK-NEXT: [[INDVARS_IV_1:%.*]] = phi i64 [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US]] ], [ [[INDVARS_IV_NEXT_1:%.*]], [[FOR_BODY4_US_1]] ]
-; CHECK-NEXT: [[TMP8:%.*]] = add nuw nsw i64 [[INDVARS_IV_1]], 15
-; CHECK-NEXT: [[TMP9:%.*]] = icmp ult i64 [[INDVARS_IV_1]], 210
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP9]])
-; CHECK-NEXT: [[TMP10:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP8]]
-; CHECK-NEXT: [[MATRIXEXT_US_1:%.*]] = load double, ptr [[TMP10]], align 8
-; CHECK-NEXT: [[MATRIXEXT8_US_1:%.*]] = load double, ptr [[TMP7]], align 8
+; CHECK-NEXT: [[INDVARS_IV_1:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_1:%.*]], [[FOR_BODY4_US_1]] ], [ [[INDVARS_IV_PH_1]], [[FOR_BODY4_US_PREHEADER_1]] ]
+; CHECK-NEXT: [[TMP57:%.*]] = add nuw nsw i64 [[INDVARS_IV_1]], 15
+; CHECK-NEXT: [[TMP58:%.*]] = icmp ult i64 [[INDVARS_IV_1]], 210
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP58]])
+; CHECK-NEXT: [[TMP59:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP57]]
+; CHECK-NEXT: [[MATRIXEXT_US_1:%.*]] = load double, ptr [[TMP59]], align 8
+; CHECK-NEXT: [[MATRIXEXT8_US_1:%.*]] = load double, ptr [[TMP32]], align 8
; CHECK-NEXT: [[MUL_US_1:%.*]] = fmul double [[MATRIXEXT_US_1]], [[MATRIXEXT8_US_1]]
-; CHECK-NEXT: [[TMP11:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP8]]
-; CHECK-NEXT: [[MATRIXEXT11_US_1:%.*]] = load double, ptr [[TMP11]], align 8
+; CHECK-NEXT: [[TMP60:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP57]]
+; CHECK-NEXT: [[MATRIXEXT11_US_1:%.*]] = load double, ptr [[TMP60]], align 8
; CHECK-NEXT: [[SUB_US_1:%.*]] = fsub double [[MATRIXEXT11_US_1]], [[MUL_US_1]]
-; CHECK-NEXT: store double [[SUB_US_1]], ptr [[TMP11]], align 8
+; CHECK-NEXT: store double [[SUB_US_1]], ptr [[TMP60]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT_1]] = add nuw nsw i64 [[INDVARS_IV_1]], 1
; CHECK-NEXT: [[EXITCOND_NOT_1:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_1]], [[CONV6]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT_1]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1:%.*]], label [[FOR_BODY4_US_1]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT_1]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1]], label [[FOR_BODY4_US_1]], !llvm.loop [[LOOP10]]
; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us.1:
-; CHECK-NEXT: [[TMP12:%.*]] = add nuw nsw i64 [[CONV6]], 30
-; CHECK-NEXT: [[TMP13:%.*]] = icmp ult i32 [[I]], 195
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP13]])
-; CHECK-NEXT: [[TMP14:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP12]]
+; CHECK-NEXT: [[TMP61:%.*]] = add nuw nsw i64 [[CONV6]], 30
+; CHECK-NEXT: [[TMP62:%.*]] = icmp ult i32 [[I]], 195
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP62]])
+; CHECK-NEXT: [[TMP63:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP61]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK_2:%.*]] = icmp ult i32 [[I]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_2]], label [[FOR_BODY4_US_PREHEADER_2:%.*]], label [[VECTOR_MEMCHECK_2:%.*]]
+; CHECK: vector.memcheck.2:
+; CHECK-NEXT: [[BOUND0_2:%.*]] = icmp ugt ptr [[SCEVGEP20]], [[B]]
+; CHECK-NEXT: [[BOUND1_2:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[FOUND_CONFLICT_2:%.*]] = and i1 [[BOUND0_2]], [[BOUND1_2]]
+; CHECK-NEXT: br i1 [[FOUND_CONFLICT_2]], label [[FOR_BODY4_US_PREHEADER_2]], label [[VECTOR_PH_2:%.*]]
+; CHECK: vector.ph.2:
+; CHECK-NEXT: [[N_VEC_2:%.*]] = and i64 [[CONV6]], 252
+; CHECK-NEXT: br label [[VECTOR_BODY_2:%.*]]
+; CHECK: vector.body.2:
+; CHECK-NEXT: [[INDEX_2:%.*]] = phi i64 [ 0, [[VECTOR_PH_2]] ], [ [[INDEX_NEXT_2:%.*]], [[VECTOR_BODY_2]] ]
+; CHECK-NEXT: [[TMP64:%.*]] = add nuw nsw i64 [[INDEX_2]], 30
+; CHECK-NEXT: [[TMP65:%.*]] = add i64 [[INDEX_2]], 31
+; CHECK-NEXT: [[TMP66:%.*]] = insertelement <2 x i64> poison, i64 [[TMP64]], i64 0
+; CHECK-NEXT: [[TMP67:%.*]] = insertelement <2 x i64> [[TMP66]], i64 [[TMP65]], i64 1
+; CHECK-NEXT: [[TMP68:%.*]] = add i64 [[INDEX_2]], 32
+; CHECK-NEXT: [[TMP69:%.*]] = add i64 [[INDEX_2]], 33
+; CHECK-NEXT: [[TMP70:%.*]] = insertelement <2 x i64> poison, i64 [[TMP68]], i64 0
+; CHECK-NEXT: [[TMP71:%.*]] = insertelement <2 x i64> [[TMP70]], i64 [[TMP69]], i64 1
+; CHECK-NEXT: [[TMP72:%.*]] = icmp ult <2 x i64> [[TMP67]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP73:%.*]] = icmp ult <2 x i64> [[TMP71]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP74:%.*]] = extractelement <2 x i1> [[TMP72]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP74]])
+; CHECK-NEXT: [[TMP75:%.*]] = extractelement <2 x i1> [[TMP72]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP75]])
+; CHECK-NEXT: [[TMP76:%.*]] = extractelement <2 x i1> [[TMP73]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP76]])
+; CHECK-NEXT: [[TMP77:%.*]] = extractelement <2 x i1> [[TMP73]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP77]])
+; CHECK-NEXT: [[TMP78:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP64]]
+; CHECK-NEXT: [[TMP79:%.*]] = getelementptr inbounds i8, ptr [[TMP78]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD_2:%.*]] = load <2 x double>, ptr [[TMP78]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD21_2:%.*]] = load <2 x double>, ptr [[TMP79]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP80:%.*]] = load double, ptr [[TMP63]], align 8, !alias.scope [[META3]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT22_2:%.*]] = insertelement <2 x double> poison, double [[TMP80]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT23_2:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22_2]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP81:%.*]] = fmul <2 x double> [[WIDE_LOAD_2]], [[BROADCAST_SPLAT23_2]]
+; CHECK-NEXT: [[TMP82:%.*]] = fmul <2 x double> [[WIDE_LOAD21_2]], [[BROADCAST_SPLAT23_2]]
+; CHECK-NEXT: [[TMP83:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP64]]
+; CHECK-NEXT: [[TMP84:%.*]] = getelementptr inbounds i8, ptr [[TMP83]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD24_2:%.*]] = load <2 x double>, ptr [[TMP83]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD25_2:%.*]] = load <2 x double>, ptr [[TMP84]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[TMP85:%.*]] = fsub <2 x double> [[WIDE_LOAD24_2]], [[TMP81]]
+; CHECK-NEXT: [[TMP86:%.*]] = fsub <2 x double> [[WIDE_LOAD25_2]], [[TMP82]]
+; CHECK-NEXT: store <2 x double> [[TMP85]], ptr [[TMP83]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: store <2 x double> [[TMP86]], ptr [[TMP84]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[INDEX_NEXT_2]] = add nuw i64 [[INDEX_2]], 4
+; CHECK-NEXT: [[TMP87:%.*]] = icmp eq i64 [[INDEX_NEXT_2]], [[N_VEC_2]]
+; CHECK-NEXT: br i1 [[TMP87]], label [[MIDDLE_BLOCK_2:%.*]], label [[VECTOR_BODY_2]], !llvm.loop [[LOOP7]]
+; CHECK: middle.block.2:
+; CHECK-NEXT: [[CMP_N_2:%.*]] = icmp eq i64 [[N_VEC_2]], [[CONV6]]
+; CHECK-NEXT: br i1 [[CMP_N_2]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2:%.*]], label [[FOR_BODY4_US_PREHEADER_2]]
+; CHECK: for.body4.us.preheader.2:
+; CHECK-NEXT: [[INDVARS_IV_PH_2:%.*]] = phi i64 [ 0, [[VECTOR_MEMCHECK_2]] ], [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1]] ], [ [[N_VEC_2]], [[MIDDLE_BLOCK_2]] ]
; CHECK-NEXT: br label [[FOR_BODY4_US_2:%.*]]
; CHECK: for.body4.us.2:
-; CHECK-NEXT: [[INDVARS_IV_2:%.*]] = phi i64 [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_1]] ], [ [[INDVARS_IV_NEXT_2:%.*]], [[FOR_BODY4_US_2]] ]
-; CHECK-NEXT: [[TMP15:%.*]] = add nuw nsw i64 [[INDVARS_IV_2]], 30
-; CHECK-NEXT: [[TMP16:%.*]] = icmp ult i64 [[INDVARS_IV_2]], 195
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP16]])
-; CHECK-NEXT: [[TMP17:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP15]]
-; CHECK-NEXT: [[MATRIXEXT_US_2:%.*]] = load double, ptr [[TMP17]], align 8
-; CHECK-NEXT: [[MATRIXEXT8_US_2:%.*]] = load double, ptr [[TMP14]], align 8
+; CHECK-NEXT: [[INDVARS_IV_2:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_2:%.*]], [[FOR_BODY4_US_2]] ], [ [[INDVARS_IV_PH_2]], [[FOR_BODY4_US_PREHEADER_2]] ]
+; CHECK-NEXT: [[TMP88:%.*]] = add nuw nsw i64 [[INDVARS_IV_2]], 30
+; CHECK-NEXT: [[TMP89:%.*]] = icmp ult i64 [[INDVARS_IV_2]], 195
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP89]])
+; CHECK-NEXT: [[TMP90:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP88]]
+; CHECK-NEXT: [[MATRIXEXT_US_2:%.*]] = load double, ptr [[TMP90]], align 8
+; CHECK-NEXT: [[MATRIXEXT8_US_2:%.*]] = load double, ptr [[TMP63]], align 8
; CHECK-NEXT: [[MUL_US_2:%.*]] = fmul double [[MATRIXEXT_US_2]], [[MATRIXEXT8_US_2]]
-; CHECK-NEXT: [[TMP18:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP15]]
-; CHECK-NEXT: [[MATRIXEXT11_US_2:%.*]] = load double, ptr [[TMP18]], align 8
+; CHECK-NEXT: [[TMP91:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP88]]
+; CHECK-NEXT: [[MATRIXEXT11_US_2:%.*]] = load double, ptr [[TMP91]], align 8
; CHECK-NEXT: [[SUB_US_2:%.*]] = fsub double [[MATRIXEXT11_US_2]], [[MUL_US_2]]
-; CHECK-NEXT: store double [[SUB_US_2]], ptr [[TMP18]], align 8
+; CHECK-NEXT: store double [[SUB_US_2]], ptr [[TMP91]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT_2]] = add nuw nsw i64 [[INDVARS_IV_2]], 1
; CHECK-NEXT: [[EXITCOND_NOT_2:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_2]], [[CONV6]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT_2]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2:%.*]], label [[FOR_BODY4_US_2]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT_2]], label [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2]], label [[FOR_BODY4_US_2]], !llvm.loop [[LOOP10]]
; CHECK: for.cond1.for.cond.cleanup3_crit_edge.us.2:
-; CHECK-NEXT: [[TMP19:%.*]] = add nuw nsw i64 [[CONV6]], 45
-; CHECK-NEXT: [[TMP20:%.*]] = icmp ult i32 [[I]], 180
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP20]])
-; CHECK-NEXT: [[TMP21:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP19]]
+; CHECK-NEXT: [[TMP92:%.*]] = add nuw nsw i64 [[CONV6]], 45
+; CHECK-NEXT: [[TMP93:%.*]] = icmp ult i32 [[I]], 180
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP93]])
+; CHECK-NEXT: [[TMP94:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP92]]
+; CHECK-NEXT: [[MIN_ITERS_CHECK_3:%.*]] = icmp ult i32 [[I]], 4
+; CHECK-NEXT: br i1 [[MIN_ITERS_CHECK_3]], label [[FOR_BODY4_US_PREHEADER_3:%.*]], label [[VECTOR_MEMCHECK_3:%.*]]
+; CHECK: vector.memcheck.3:
+; CHECK-NEXT: [[BOUND0_3:%.*]] = icmp ugt ptr [[SCEVGEP20]], [[B]]
+; CHECK-NEXT: [[BOUND1_3:%.*]] = icmp ugt ptr [[SCEVGEP]], [[A]]
+; CHECK-NEXT: [[FOUND_CONFLICT_3:%.*]] = and i1 [[BOUND0_3]], [[BOUND1_3]]
+; CHECK-NEXT: br i1 [[FOUND_CONFLICT_3]], label [[FOR_BODY4_US_PREHEADER_3]], label [[VECTOR_PH_3:%.*]]
+; CHECK: vector.ph.3:
+; CHECK-NEXT: [[N_VEC_3:%.*]] = and i64 [[CONV6]], 252
+; CHECK-NEXT: br label [[VECTOR_BODY_3:%.*]]
+; CHECK: vector.body.3:
+; CHECK-NEXT: [[INDEX_3:%.*]] = phi i64 [ 0, [[VECTOR_PH_3]] ], [ [[INDEX_NEXT_3:%.*]], [[VECTOR_BODY_3]] ]
+; CHECK-NEXT: [[TMP95:%.*]] = add nuw nsw i64 [[INDEX_3]], 45
+; CHECK-NEXT: [[TMP96:%.*]] = add i64 [[INDEX_3]], 46
+; CHECK-NEXT: [[TMP97:%.*]] = insertelement <2 x i64> poison, i64 [[TMP95]], i64 0
+; CHECK-NEXT: [[TMP98:%.*]] = insertelement <2 x i64> [[TMP97]], i64 [[TMP96]], i64 1
+; CHECK-NEXT: [[TMP99:%.*]] = add i64 [[INDEX_3]], 47
+; CHECK-NEXT: [[TMP100:%.*]] = add i64 [[INDEX_3]], 48
+; CHECK-NEXT: [[TMP101:%.*]] = insertelement <2 x i64> poison, i64 [[TMP99]], i64 0
+; CHECK-NEXT: [[TMP102:%.*]] = insertelement <2 x i64> [[TMP101]], i64 [[TMP100]], i64 1
+; CHECK-NEXT: [[TMP103:%.*]] = icmp ult <2 x i64> [[TMP98]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP104:%.*]] = icmp ult <2 x i64> [[TMP102]], <i64 225, i64 225>
+; CHECK-NEXT: [[TMP105:%.*]] = extractelement <2 x i1> [[TMP103]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP105]])
+; CHECK-NEXT: [[TMP106:%.*]] = extractelement <2 x i1> [[TMP103]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP106]])
+; CHECK-NEXT: [[TMP107:%.*]] = extractelement <2 x i1> [[TMP104]], i64 0
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP107]])
+; CHECK-NEXT: [[TMP108:%.*]] = extractelement <2 x i1> [[TMP104]], i64 1
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP108]])
+; CHECK-NEXT: [[TMP109:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP95]]
+; CHECK-NEXT: [[TMP110:%.*]] = getelementptr inbounds i8, ptr [[TMP109]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD_3:%.*]] = load <2 x double>, ptr [[TMP109]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD21_3:%.*]] = load <2 x double>, ptr [[TMP110]], align 8, !alias.scope [[META0]]
+; CHECK-NEXT: [[TMP111:%.*]] = load double, ptr [[TMP94]], align 8, !alias.scope [[META3]]
+; CHECK-NEXT: [[BROADCAST_SPLATINSERT22_3:%.*]] = insertelement <2 x double> poison, double [[TMP111]], i64 0
+; CHECK-NEXT: [[BROADCAST_SPLAT23_3:%.*]] = shufflevector <2 x double> [[BROADCAST_SPLATINSERT22_3]], <2 x double> poison, <2 x i32> zeroinitializer
+; CHECK-NEXT: [[TMP112:%.*]] = fmul <2 x double> [[WIDE_LOAD_3]], [[BROADCAST_SPLAT23_3]]
+; CHECK-NEXT: [[TMP113:%.*]] = fmul <2 x double> [[WIDE_LOAD21_3]], [[BROADCAST_SPLAT23_3]]
+; CHECK-NEXT: [[TMP114:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP95]]
+; CHECK-NEXT: [[TMP115:%.*]] = getelementptr inbounds i8, ptr [[TMP114]], i64 16
+; CHECK-NEXT: [[WIDE_LOAD24_3:%.*]] = load <2 x double>, ptr [[TMP114]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[WIDE_LOAD25_3:%.*]] = load <2 x double>, ptr [[TMP115]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[TMP116:%.*]] = fsub <2 x double> [[WIDE_LOAD24_3]], [[TMP112]]
+; CHECK-NEXT: [[TMP117:%.*]] = fsub <2 x double> [[WIDE_LOAD25_3]], [[TMP113]]
+; CHECK-NEXT: store <2 x double> [[TMP116]], ptr [[TMP114]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: store <2 x double> [[TMP117]], ptr [[TMP115]], align 8, !alias.scope [[META5]], !noalias [[META0]]
+; CHECK-NEXT: [[INDEX_NEXT_3]] = add nuw i64 [[INDEX_3]], 4
+; CHECK-NEXT: [[TMP118:%.*]] = icmp eq i64 [[INDEX_NEXT_3]], [[N_VEC_3]]
+; CHECK-NEXT: br i1 [[TMP118]], label [[MIDDLE_BLOCK_3:%.*]], label [[VECTOR_BODY_3]], !llvm.loop [[LOOP7]]
+; CHECK: middle.block.3:
+; CHECK-NEXT: [[CMP_N_3:%.*]] = icmp eq i64 [[N_VEC_3]], [[CONV6]]
+; CHECK-NEXT: br i1 [[CMP_N_3]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY4_US_PREHEADER_3]]
+; CHECK: for.body4.us.preheader.3:
+; CHECK-NEXT: [[INDVARS_IV_PH_3:%.*]] = phi i64 [ 0, [[VECTOR_MEMCHECK_3]] ], [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2]] ], [ [[N_VEC_3]], [[MIDDLE_BLOCK_3]] ]
; CHECK-NEXT: br label [[FOR_BODY4_US_3:%.*]]
; CHECK: for.body4.us.3:
-; CHECK-NEXT: [[INDVARS_IV_3:%.*]] = phi i64 [ 0, [[FOR_COND1_FOR_COND_CLEANUP3_CRIT_EDGE_US_2]] ], [ [[INDVARS_IV_NEXT_3:%.*]], [[FOR_BODY4_US_3]] ]
-; CHECK-NEXT: [[TMP22:%.*]] = add nuw nsw i64 [[INDVARS_IV_3]], 45
-; CHECK-NEXT: [[TMP23:%.*]] = icmp ult i64 [[INDVARS_IV_3]], 180
-; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP23]])
-; CHECK-NEXT: [[TMP24:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP22]]
-; CHECK-NEXT: [[MATRIXEXT_US_3:%.*]] = load double, ptr [[TMP24]], align 8
-; CHECK-NEXT: [[MATRIXEXT8_US_3:%.*]] = load double, ptr [[TMP21]], align 8
+; CHECK-NEXT: [[INDVARS_IV_3:%.*]] = phi i64 [ [[INDVARS_IV_NEXT_3:%.*]], [[FOR_BODY4_US_3]] ], [ [[INDVARS_IV_PH_3]], [[FOR_BODY4_US_PREHEADER_3]] ]
+; CHECK-NEXT: [[TMP119:%.*]] = add nuw nsw i64 [[INDVARS_IV_3]], 45
+; CHECK-NEXT: [[TMP120:%.*]] = icmp ult i64 [[INDVARS_IV_3]], 180
+; CHECK-NEXT: tail call void @llvm.assume(i1 [[TMP120]])
+; CHECK-NEXT: [[TMP121:%.*]] = getelementptr inbounds <225 x double>, ptr [[A]], i64 0, i64 [[TMP119]]
+; CHECK-NEXT: [[MATRIXEXT_US_3:%.*]] = load double, ptr [[TMP121]], align 8
+; CHECK-NEXT: [[MATRIXEXT8_US_3:%.*]] = load double, ptr [[TMP94]], align 8
; CHECK-NEXT: [[MUL_US_3:%.*]] = fmul double [[MATRIXEXT_US_3]], [[MATRIXEXT8_US_3]]
-; CHECK-NEXT: [[TMP25:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP22]]
-; CHECK-NEXT: [[MATRIXEXT11_US_3:%.*]] = load double, ptr [[TMP25]], align 8
+; CHECK-NEXT: [[TMP122:%.*]] = getelementptr inbounds <225 x double>, ptr [[B]], i64 0, i64 [[TMP119]]
+; CHECK-NEXT: [[MATRIXEXT11_US_3:%.*]] = load double, ptr [[TMP122]], align 8
; CHECK-NEXT: [[SUB_US_3:%.*]] = fsub double [[MATRIXEXT11_US_3]], [[MUL_US_3]]
-; CHECK-NEXT: store double [[SUB_US_3]], ptr [[TMP25]], align 8
+; CHECK-NEXT: store double [[SUB_US_3]], ptr [[TMP122]], align 8
; CHECK-NEXT: [[INDVARS_IV_NEXT_3]] = add nuw nsw i64 [[INDVARS_IV_3]], 1
; CHECK-NEXT: [[EXITCOND_NOT_3:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_3]], [[CONV6]]
-; CHECK-NEXT: br i1 [[EXITCOND_NOT_3]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY4_US_3]]
+; CHECK-NEXT: br i1 [[EXITCOND_NOT_3]], label [[FOR_COND_CLEANUP]], label [[FOR_BODY4_US_3]], !llvm.loop [[LOOP10]]
; CHECK: for.cond.cleanup:
; CHECK-NEXT: ret void
;
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll
index 290560151b79..3749bdf1bba3 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat-inseltpoison.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX7 %s
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX8 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX8 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX9 %s
define <2 x i16> @uadd_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX7-LABEL: @uadd_sat_v2i16(
@@ -21,6 +21,11 @@ define <2 x i16> @uadd_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @uadd_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -51,6 +56,11 @@ define <2 x i16> @usub_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @usub_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -81,6 +91,11 @@ define <2 x i16> @sadd_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @sadd_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -111,6 +126,11 @@ define <2 x i16> @ssub_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @ssub_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -252,6 +272,18 @@ define <3 x i16> @uadd_sat_v3i16(<3 x i16> %arg0, <3 x i16> %arg1) {
; GFX8-NEXT: [[INS_2:%.*]] = insertelement <3 x i16> [[TMP3]], i16 [[ADD_2]], i64 2
; GFX8-NEXT: ret <3 x i16> [[INS_2]]
;
+; GFX9-LABEL: @uadd_sat_v3i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[ARG0_2:%.*]] = extractelement <3 x i16> [[ARG0:%.*]], i64 2
+; GFX9-NEXT: [[ARG1_2:%.*]] = extractelement <3 x i16> [[ARG1:%.*]], i64 2
+; GFX9-NEXT: [[TMP0:%.*]] = shufflevector <3 x i16> [[ARG0]], <3 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP1:%.*]] = shufflevector <3 x i16> [[ARG1]], <3 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
+; GFX9-NEXT: [[ADD_2:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_2]], i16 [[ARG1_2]])
+; GFX9-NEXT: [[TMP3:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> poison, <3 x i32> <i32 0, i32 1, i32 poison>
+; GFX9-NEXT: [[INS_2:%.*]] = insertelement <3 x i16> [[TMP3]], i16 [[ADD_2]], i64 2
+; GFX9-NEXT: ret <3 x i16> [[INS_2]]
+;
bb:
%arg0.0 = extractelement <3 x i16> %arg0, i64 0
%arg0.1 = extractelement <3 x i16> %arg0, i64 1
@@ -291,19 +323,25 @@ define <4 x i16> @uadd_sat_v4i16(<4 x i16> %arg0, <4 x i16> %arg1) {
;
; GFX8-LABEL: @uadd_sat_v4i16(
; GFX8-NEXT: bb:
-; GFX8-NEXT: [[ARG0_2:%.*]] = extractelement <4 x i16> [[ARG0:%.*]], i64 2
-; GFX8-NEXT: [[ARG0_3:%.*]] = extractelement <4 x i16> [[ARG0]], i64 3
-; GFX8-NEXT: [[ARG1_2:%.*]] = extractelement <4 x i16> [[ARG1:%.*]], i64 2
-; GFX8-NEXT: [[ARG1_3:%.*]] = extractelement <4 x i16> [[ARG1]], i64 3
-; GFX8-NEXT: [[TMP0:%.*]] = shufflevector <4 x i16> [[ARG0]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
-; GFX8-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[ARG1]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX8-NEXT: [[TMP0:%.*]] = shufflevector <4 x i16> [[ARG0:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX8-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[ARG1:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
; GFX8-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
-; GFX8-NEXT: [[ADD_2:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_2]], i16 [[ARG1_2]])
-; GFX8-NEXT: [[ADD_3:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_3]], i16 [[ARG1_3]])
-; GFX8-NEXT: [[TMP3:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; GFX8-NEXT: [[INS_2:%.*]] = insertelement <4 x i16> [[TMP3]], i16 [[ADD_2]], i64 2
-; GFX8-NEXT: [[INS_3:%.*]] = insertelement <4 x i16> [[INS_2]], i16 [[ADD_3]], i64 3
-; GFX8-NEXT: ret <4 x i16> [[INS_3]]
+; GFX8-NEXT: [[TMP3:%.*]] = shufflevector <4 x i16> [[ARG0]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX8-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[ARG1]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX8-NEXT: [[TMP5:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP3]], <2 x i16> [[TMP4]])
+; GFX8-NEXT: [[INS_31:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; GFX8-NEXT: ret <4 x i16> [[INS_31]]
+;
+; GFX9-LABEL: @uadd_sat_v4i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = shufflevector <4 x i16> [[ARG0:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[ARG1:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
+; GFX9-NEXT: [[TMP3:%.*]] = shufflevector <4 x i16> [[ARG0]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX9-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[ARG1]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX9-NEXT: [[TMP5:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP3]], <2 x i16> [[TMP4]])
+; GFX9-NEXT: [[INS_31:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; GFX9-NEXT: ret <4 x i16> [[INS_31]]
;
bb:
%arg0.0 = extractelement <4 x i16> %arg0, i64 0
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll
index 2038400a0586..0bb641371825 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/add_sub_sat.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX7 %s
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX8 %s
-; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX8 %s
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -passes=slp-vectorizer,instcombine %s | FileCheck -check-prefixes=GCN,GFX9 %s
define <2 x i16> @uadd_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX7-LABEL: @uadd_sat_v2i16(
@@ -21,6 +21,11 @@ define <2 x i16> @uadd_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @uadd_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -51,6 +56,11 @@ define <2 x i16> @usub_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @usub_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.usub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -81,6 +91,11 @@ define <2 x i16> @sadd_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @sadd_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.sadd.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -111,6 +126,11 @@ define <2 x i16> @ssub_sat_v2i16(<2 x i16> %arg0, <2 x i16> %arg1) {
; GFX8-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
; GFX8-NEXT: ret <2 x i16> [[TMP0]]
;
+; GFX9-LABEL: @ssub_sat_v2i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = call <2 x i16> @llvm.ssub.sat.v2i16(<2 x i16> [[ARG0:%.*]], <2 x i16> [[ARG1:%.*]])
+; GFX9-NEXT: ret <2 x i16> [[TMP0]]
+;
bb:
%arg0.0 = extractelement <2 x i16> %arg0, i64 0
%arg0.1 = extractelement <2 x i16> %arg0, i64 1
@@ -252,6 +272,18 @@ define <3 x i16> @uadd_sat_v3i16(<3 x i16> %arg0, <3 x i16> %arg1) {
; GFX8-NEXT: [[INS_2:%.*]] = insertelement <3 x i16> [[TMP3]], i16 [[ADD_2]], i64 2
; GFX8-NEXT: ret <3 x i16> [[INS_2]]
;
+; GFX9-LABEL: @uadd_sat_v3i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[ARG0_2:%.*]] = extractelement <3 x i16> [[ARG0:%.*]], i64 2
+; GFX9-NEXT: [[ARG1_2:%.*]] = extractelement <3 x i16> [[ARG1:%.*]], i64 2
+; GFX9-NEXT: [[TMP0:%.*]] = shufflevector <3 x i16> [[ARG0]], <3 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP1:%.*]] = shufflevector <3 x i16> [[ARG1]], <3 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
+; GFX9-NEXT: [[ADD_2:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_2]], i16 [[ARG1_2]])
+; GFX9-NEXT: [[TMP3:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> poison, <3 x i32> <i32 0, i32 1, i32 poison>
+; GFX9-NEXT: [[INS_2:%.*]] = insertelement <3 x i16> [[TMP3]], i16 [[ADD_2]], i64 2
+; GFX9-NEXT: ret <3 x i16> [[INS_2]]
+;
bb:
%arg0.0 = extractelement <3 x i16> %arg0, i64 0
%arg0.1 = extractelement <3 x i16> %arg0, i64 1
@@ -291,19 +323,25 @@ define <4 x i16> @uadd_sat_v4i16(<4 x i16> %arg0, <4 x i16> %arg1) {
;
; GFX8-LABEL: @uadd_sat_v4i16(
; GFX8-NEXT: bb:
-; GFX8-NEXT: [[ARG0_2:%.*]] = extractelement <4 x i16> [[ARG0:%.*]], i64 2
-; GFX8-NEXT: [[ARG0_3:%.*]] = extractelement <4 x i16> [[ARG0]], i64 3
-; GFX8-NEXT: [[ARG1_2:%.*]] = extractelement <4 x i16> [[ARG1:%.*]], i64 2
-; GFX8-NEXT: [[ARG1_3:%.*]] = extractelement <4 x i16> [[ARG1]], i64 3
-; GFX8-NEXT: [[TMP0:%.*]] = shufflevector <4 x i16> [[ARG0]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
-; GFX8-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[ARG1]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX8-NEXT: [[TMP0:%.*]] = shufflevector <4 x i16> [[ARG0:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX8-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[ARG1:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
; GFX8-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
-; GFX8-NEXT: [[ADD_2:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_2]], i16 [[ARG1_2]])
-; GFX8-NEXT: [[ADD_3:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_3]], i16 [[ARG1_3]])
-; GFX8-NEXT: [[TMP3:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; GFX8-NEXT: [[INS_2:%.*]] = insertelement <4 x i16> [[TMP3]], i16 [[ADD_2]], i64 2
-; GFX8-NEXT: [[INS_3:%.*]] = insertelement <4 x i16> [[INS_2]], i16 [[ADD_3]], i64 3
-; GFX8-NEXT: ret <4 x i16> [[INS_3]]
+; GFX8-NEXT: [[TMP3:%.*]] = shufflevector <4 x i16> [[ARG0]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX8-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[ARG1]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX8-NEXT: [[TMP5:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP3]], <2 x i16> [[TMP4]])
+; GFX8-NEXT: [[INS_31:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; GFX8-NEXT: ret <4 x i16> [[INS_31]]
+;
+; GFX9-LABEL: @uadd_sat_v4i16(
+; GFX9-NEXT: bb:
+; GFX9-NEXT: [[TMP0:%.*]] = shufflevector <4 x i16> [[ARG0:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP1:%.*]] = shufflevector <4 x i16> [[ARG1:%.*]], <4 x i16> poison, <2 x i32> <i32 0, i32 1>
+; GFX9-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
+; GFX9-NEXT: [[TMP3:%.*]] = shufflevector <4 x i16> [[ARG0]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX9-NEXT: [[TMP4:%.*]] = shufflevector <4 x i16> [[ARG1]], <4 x i16> poison, <2 x i32> <i32 2, i32 3>
+; GFX9-NEXT: [[TMP5:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP3]], <2 x i16> [[TMP4]])
+; GFX9-NEXT: [[INS_31:%.*]] = shufflevector <2 x i16> [[TMP2]], <2 x i16> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; GFX9-NEXT: ret <4 x i16> [[INS_31]]
;
bb:
%arg0.0 = extractelement <4 x i16> %arg0, i64 0
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/crash_extract_subvector_cost.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/crash_extract_subvector_cost.ll
index 0a020c855cc2..e474bab2ad96 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/crash_extract_subvector_cost.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/crash_extract_subvector_cost.ll
@@ -4,15 +4,10 @@
define <2 x i16> @uadd_sat_v9i16_combine_vi16(<9 x i16> %arg0, <9 x i16> %arg1) {
; CHECK-LABEL: @uadd_sat_v9i16_combine_vi16(
; CHECK-NEXT: bb:
-; CHECK-NEXT: [[ARG0_1:%.*]] = extractelement <9 x i16> undef, i64 7
-; CHECK-NEXT: [[ARG0_2:%.*]] = extractelement <9 x i16> [[ARG0:%.*]], i64 8
-; CHECK-NEXT: [[ARG1_1:%.*]] = extractelement <9 x i16> [[ARG1:%.*]], i64 7
-; CHECK-NEXT: [[ARG1_2:%.*]] = extractelement <9 x i16> [[ARG1]], i64 8
-; CHECK-NEXT: [[ADD_1:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_1]], i16 [[ARG1_1]])
-; CHECK-NEXT: [[ADD_2:%.*]] = call i16 @llvm.uadd.sat.i16(i16 [[ARG0_2]], i16 [[ARG1_2]])
-; CHECK-NEXT: [[INS_1:%.*]] = insertelement <2 x i16> undef, i16 [[ADD_1]], i64 0
-; CHECK-NEXT: [[INS_2:%.*]] = insertelement <2 x i16> [[INS_1]], i16 [[ADD_2]], i64 1
-; CHECK-NEXT: ret <2 x i16> [[INS_2]]
+; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <9 x i16> [[ARG0:%.*]], <9 x i16> poison, <2 x i32> <i32 poison, i32 8>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <9 x i16> [[ARG1:%.*]], <9 x i16> poison, <2 x i32> <i32 7, i32 8>
+; CHECK-NEXT: [[TMP2:%.*]] = call <2 x i16> @llvm.uadd.sat.v2i16(<2 x i16> [[TMP0]], <2 x i16> [[TMP1]])
+; CHECK-NEXT: ret <2 x i16> [[TMP2]]
;
bb:
%arg0.1 = extractelement <9 x i16> undef, i64 7
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll
index 46980b33e401..3b63c1e35610 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/phi-result-use-order.ll
@@ -4,23 +4,20 @@
define <4 x half> @phis(i1 %cmp1, <4 x half> %in1, <4 x half> %in2) {
; CHECK-LABEL: @phis(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A2:%.*]] = extractelement <4 x half> [[IN1:%.*]], i64 2
-; CHECK-NEXT: [[A3:%.*]] = extractelement <4 x half> [[IN1]], i64 3
-; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x half> [[IN1]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x half> [[IN1:%.*]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x half> [[IN1]], <4 x half> poison, <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[BB1:%.*]], label [[BB0:%.*]]
; CHECK: bb0:
-; CHECK-NEXT: [[B2:%.*]] = extractelement <4 x half> [[IN2:%.*]], i64 2
-; CHECK-NEXT: [[B3:%.*]] = extractelement <4 x half> [[IN2]], i64 3
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x half> [[IN2]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x half> [[IN2:%.*]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x half> [[IN2]], <4 x half> poison, <2 x i32> <i32 2, i32 3>
; CHECK-NEXT: br label [[BB1]]
; CHECK: bb1:
-; CHECK-NEXT: [[C2:%.*]] = phi half [ [[A2]], [[ENTRY:%.*]] ], [ [[B2]], [[BB0]] ]
-; CHECK-NEXT: [[C3:%.*]] = phi half [ [[A3]], [[ENTRY]] ], [ [[B3]], [[BB0]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = phi <2 x half> [ [[TMP0]], [[ENTRY]] ], [ [[TMP1]], [[BB0]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x half> [[TMP2]], <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; CHECK-NEXT: [[O2:%.*]] = insertelement <4 x half> [[TMP3]], half [[C2]], i64 2
-; CHECK-NEXT: [[O3:%.*]] = insertelement <4 x half> [[O2]], half [[C3]], i64 3
-; CHECK-NEXT: ret <4 x half> [[O3]]
+; CHECK-NEXT: [[TMP4:%.*]] = phi <2 x half> [ [[TMP0]], [[ENTRY:%.*]] ], [ [[TMP2]], [[BB0]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = phi <2 x half> [ [[TMP1]], [[ENTRY]] ], [ [[TMP3]], [[BB0]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x half> [[TMP4]], <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x half> [[TMP5]], <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <2 x half> [[TMP4]], <2 x half> [[TMP5]], <4 x i32> <i32 0, i32 1, i32 2, i32 3>
+; CHECK-NEXT: ret <4 x half> [[TMP8]]
;
entry:
%a0 = extractelement <4 x half> %in1, i64 0
@@ -52,23 +49,20 @@ bb1:
define <4 x half> @phis_reverse(i1 %cmp1, <4 x half> %in1, <4 x half> %in2) {
; CHECK-LABEL: @phis_reverse(
; CHECK-NEXT: entry:
-; CHECK-NEXT: [[A2:%.*]] = extractelement <4 x half> [[IN1:%.*]], i64 2
-; CHECK-NEXT: [[A3:%.*]] = extractelement <4 x half> [[IN1]], i64 3
-; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x half> [[IN1]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP0:%.*]] = shufflevector <4 x half> [[IN1:%.*]], <4 x half> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x half> [[IN1]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
; CHECK-NEXT: br i1 [[CMP1:%.*]], label [[BB1:%.*]], label [[BB0:%.*]]
; CHECK: bb0:
-; CHECK-NEXT: [[B2:%.*]] = extractelement <4 x half> [[IN2:%.*]], i64 2
-; CHECK-NEXT: [[B3:%.*]] = extractelement <4 x half> [[IN2]], i64 3
-; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <4 x half> [[IN2]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
+; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <4 x half> [[IN2:%.*]], <4 x half> poison, <2 x i32> <i32 2, i32 3>
+; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <4 x half> [[IN2]], <4 x half> poison, <2 x i32> <i32 0, i32 1>
; CHECK-NEXT: br label [[BB1]]
; CHECK: bb1:
-; CHECK-NEXT: [[C3:%.*]] = phi half [ [[A3]], [[ENTRY:%.*]] ], [ [[B3]], [[BB0]] ]
-; CHECK-NEXT: [[C2:%.*]] = phi half [ [[A2]], [[ENTRY]] ], [ [[B2]], [[BB0]] ]
-; CHECK-NEXT: [[TMP2:%.*]] = phi <2 x half> [ [[TMP0]], [[ENTRY]] ], [ [[TMP1]], [[BB0]] ]
-; CHECK-NEXT: [[TMP3:%.*]] = shufflevector <2 x half> [[TMP2]], <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
-; CHECK-NEXT: [[O2:%.*]] = insertelement <4 x half> [[TMP3]], half [[C2]], i64 2
-; CHECK-NEXT: [[O3:%.*]] = insertelement <4 x half> [[O2]], half [[C3]], i64 3
-; CHECK-NEXT: ret <4 x half> [[O3]]
+; CHECK-NEXT: [[TMP4:%.*]] = phi <2 x half> [ [[TMP0]], [[ENTRY:%.*]] ], [ [[TMP2]], [[BB0]] ]
+; CHECK-NEXT: [[TMP5:%.*]] = phi <2 x half> [ [[TMP1]], [[ENTRY]] ], [ [[TMP3]], [[BB0]] ]
+; CHECK-NEXT: [[TMP6:%.*]] = shufflevector <2 x half> [[TMP5]], <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP7:%.*]] = shufflevector <2 x half> [[TMP4]], <2 x half> poison, <4 x i32> <i32 0, i32 1, i32 poison, i32 poison>
+; CHECK-NEXT: [[TMP8:%.*]] = shufflevector <4 x half> [[TMP6]], <4 x half> [[TMP7]], <4 x i32> <i32 0, i32 1, i32 4, i32 5>
+; CHECK-NEXT: ret <4 x half> [[TMP8]]
;
entry:
%a0 = extractelement <4 x half> %in1, i64 0
diff --git a/llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll b/llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll
index b34b9a352536..dfa8be974177 100644
--- a/llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll
+++ b/llvm/test/Transforms/SLPVectorizer/AMDGPU/reduction.ll
@@ -3,21 +3,10 @@
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -passes=slp-vectorizer,dce < %s | FileCheck -check-prefixes=GCN,VI %s
define half @reduction_half4(<4 x half> %a) {
-; GFX9-LABEL: @reduction_half4(
-; GFX9-NEXT: entry:
-; GFX9-NEXT: [[TMP0:%.*]] = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH8000, <4 x half> [[A:%.*]])
-; GFX9-NEXT: ret half [[TMP0]]
-;
-; VI-LABEL: @reduction_half4(
-; VI-NEXT: entry:
-; VI-NEXT: [[ELT0:%.*]] = extractelement <4 x half> [[A:%.*]], i64 0
-; VI-NEXT: [[ELT1:%.*]] = extractelement <4 x half> [[A]], i64 1
-; VI-NEXT: [[ELT2:%.*]] = extractelement <4 x half> [[A]], i64 2
-; VI-NEXT: [[ELT3:%.*]] = extractelement <4 x half> [[A]], i64 3
-; VI-NEXT: [[ADD1:%.*]] = fadd fast half [[ELT1]], [[ELT0]]
-; VI-NEXT: [[ADD2:%.*]] = fadd fast half [[ELT2]], [[ADD1]]
-; VI-NEXT: [[ADD3:%.*]] = fadd fast half [[ELT3]], [[ADD2]]
-; VI-NEXT: ret half [[ADD3]]
+; GCN-LABEL: @reduction_half4(
+; GCN-NEXT: entry:
+; GCN-NEXT: [[TMP0:%.*]] = call fast half @llvm.vector.reduce.fadd.v4f16(half 0xH8000, <4 x half> [[A:%.*]])
+; GCN-NEXT: ret half [[TMP0]]
;
entry:
%elt0 = extractelement <4 x half> %a, i64 0
@@ -33,29 +22,10 @@ entry:
}
define half @reduction_half8(<8 x half> %vec8) {
-; GFX9-LABEL: @reduction_half8(
-; GFX9-NEXT: entry:
-; GFX9-NEXT: [[TMP0:%.*]] = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH8000, <8 x half> [[VEC8:%.*]])
-; GFX9-NEXT: ret half [[TMP0]]
-;
-; VI-LABEL: @reduction_half8(
-; VI-NEXT: entry:
-; VI-NEXT: [[ELT0:%.*]] = extractelement <8 x half> [[VEC8:%.*]], i64 0
-; VI-NEXT: [[ELT1:%.*]] = extractelement <8 x half> [[VEC8]], i64 1
-; VI-NEXT: [[ELT2:%.*]] = extractelement <8 x half> [[VEC8]], i64 2
-; VI-NEXT: [[ELT3:%.*]] = extractelement <8 x half> [[VEC8]], i64 3
-; VI-NEXT: [[ELT4:%.*]] = extractelement <8 x half> [[VEC8]], i64 4
-; VI-NEXT: [[ELT5:%.*]] = extractelement <8 x half> [[VEC8]], i64 5
-; VI-NEXT: [[ELT6:%.*]] = extractelement <8 x half> [[VEC8]], i64 6
-; VI-NEXT: [[ELT7:%.*]] = extractelement <8 x half> [[VEC8]], i64 7
-; VI-NEXT: [[ADD1:%.*]] = fadd fast half [[ELT1]], [[ELT0]]
-; VI-NEXT: [[ADD2:%.*]] = fadd fast half [[ELT2]], [[ADD1]]
-; VI-NEXT: [[ADD3:%.*]] = fadd fast half [[ELT3]], [[ADD2]]
-; VI-NEXT: [[ADD4:%.*]] = fadd fast half [[ELT4]], [[ADD3]]
-; VI-NEXT: [[ADD5:%.*]] = fadd fast half [[ELT5]], [[ADD4]]
-; VI-NEXT: [[ADD6:%.*]] = fadd fast half [[ELT6]], [[ADD5]]
-; VI-NEXT: [[ADD7:%.*]] = fadd fast half [[ELT7]], [[ADD6]]
-; VI-NEXT: ret half [[ADD7]]
+; GCN-LABEL: @reduction_half8(
+; GCN-NEXT: entry:
+; GCN-NEXT: [[TMP0:%.*]] = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH8000, <8 x half> [[VEC8:%.*]])
+; GCN-NEXT: ret half [[TMP0]]
;
entry:
%elt0 = extractelement <8 x half> %vec8, i64 0
@@ -86,15 +56,7 @@ define half @reduction_half16(<16 x half> %vec16) {
;
; VI-LABEL: @reduction_half16(
; VI-NEXT: entry:
-; VI-NEXT: [[ELT0:%.*]] = extractelement <16 x half> [[VEC16:%.*]], i64 0
-; VI-NEXT: [[ELT1:%.*]] = extractelement <16 x half> [[VEC16]], i64 1
-; VI-NEXT: [[ELT2:%.*]] = extractelement <16 x half> [[VEC16]], i64 2
-; VI-NEXT: [[ELT3:%.*]] = extractelement <16 x half> [[VEC16]], i64 3
-; VI-NEXT: [[ELT4:%.*]] = extractelement <16 x half> [[VEC16]], i64 4
-; VI-NEXT: [[ELT5:%.*]] = extractelement <16 x half> [[VEC16]], i64 5
-; VI-NEXT: [[ELT6:%.*]] = extractelement <16 x half> [[VEC16]], i64 6
-; VI-NEXT: [[ELT7:%.*]] = extractelement <16 x half> [[VEC16]], i64 7
-; VI-NEXT: [[ELT8:%.*]] = extractelement <16 x half> [[VEC16]], i64 8
+; VI-NEXT: [[ELT8:%.*]] = extractelement <16 x half> [[VEC16:%.*]], i64 8
; VI-NEXT: [[ELT9:%.*]] = extractelement <16 x half> [[VEC16]], i64 9
; VI-NEXT: [[ELT10:%.*]] = extractelement <16 x half> [[VEC16]], i64 10
; VI-NEXT: [[ELT11:%.*]] = extractelement <16 x half> [[VEC16]], i64 11
@@ -102,22 +64,17 @@ define half @reduction_half16(<16 x half> %vec16) {
; VI-NEXT: [[ELT13:%.*]] = extractelement <16 x half> [[VEC16]], i64 13
; VI-NEXT: [[ELT14:%.*]] = extractelement <16 x half> [[VEC16]], i64 14
; VI-NEXT: [[ELT15:%.*]] = extractelement <16 x half> [[VEC16]], i64 15
-; VI-NEXT: [[ADD1:%.*]] = fadd fast half [[ELT1]], [[ELT0]]
-; VI-NEXT: [[ADD2:%.*]] = fadd fast half [[ELT2]], [[ADD1]]
-; VI-NEXT: [[ADD3:%.*]] = fadd fast half [[ELT3]], [[ADD2]]
-; VI-NEXT: [[ADD4:%.*]] = fadd fast half [[ELT4]], [[ADD3]]
-; VI-NEXT: [[ADD5:%.*]] = fadd fast half [[ELT5]], [[ADD4]]
-; VI-NEXT: [[ADD6:%.*]] = fadd fast half [[ELT6]], [[ADD5]]
-; VI-NEXT: [[ADD7:%.*]] = fadd fast half [[ELT7]], [[ADD6]]
-; VI-NEXT: [[ADD8:%.*]] = fadd fast half [[ELT8]], [[ADD7]]
-; VI-NEXT: [[ADD9:%.*]] = fadd fast half [[ELT9]], [[ADD8]]
-; VI-NEXT: [[ADD10:%.*]] = fadd fast half [[ELT10]], [[ADD9]]
-; VI-NEXT: [[ADD11:%.*]] = fadd fast half [[ELT11]], [[ADD10]]
-; VI-NEXT: [[ADD12:%.*]] = fadd fast half [[ELT12]], [[ADD11]]
-; VI-NEXT: [[ADD13:%.*]] = fadd fast half [[ELT13]], [[ADD12]]
-; VI-NEXT: [[ADD14:%.*]] = fadd fast half [[ELT14]], [[ADD13]]
-; VI-NEXT: [[ADD15:%.*]] = fadd fast half [[ELT15]], [[ADD14]]
-; VI-NEXT: ret half [[ADD15]]
+; VI-NEXT: [[TMP0:%.*]] = shufflevector <16 x half> [[VEC16]], <16 x half> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+; VI-NEXT: [[TMP1:%.*]] = call fast half @llvm.vector.reduce.fadd.v8f16(half 0xH8000, <8 x half> [[TMP0]])
+; VI-NEXT: [[OP_RDX:%.*]] = fadd fast half [[TMP1]], [[ELT8]]
+; VI-NEXT: [[OP_RDX1:%.*]] = fadd fast half [[ELT9]], [[ELT10]]
+; VI-NEXT: [[OP_RDX2:%.*]] = fadd fast half [[ELT11]], [[ELT12]]
+; VI-NEXT: [[OP_RDX3:%.*]] = fadd fast half [[ELT13]], [[ELT14]]
+; VI-NEXT: [[OP_RDX4:%.*]] = fadd fast half [[OP_RDX]], [[OP_RDX1]]
+; VI-NEXT: [[OP_RDX5:%.*]] = fadd fast half [[OP_RDX2]], [[OP_RDX3]]
+; VI-NEXT: [[OP_RDX6:%.*]] = fadd fast half [[OP_RDX4]], [[OP_RDX5]]
+; VI-NEXT: [[OP_RDX7:%.*]] = fadd fast half [[OP_RDX6]], [[ELT15]]
+; VI-NEXT: ret half [[OP_RDX7]]
;
entry:
%elt0 = extractelement <16 x half> %vec16, i64 0
@@ -183,21 +140,10 @@ entry:
}
define i16 @reduction_v4i16(<4 x i16> %a) {
-; GFX9-LABEL: @reduction_v4i16(
-; GFX9-NEXT: entry:
-; GFX9-NEXT: [[TMP0:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[A:%.*]])
-; GFX9-NEXT: ret i16 [[TMP0]]
-;
-; VI-LABEL: @reduction_v4i16(
-; VI-NEXT: entry:
-; VI-NEXT: [[ELT0:%.*]] = extractelement <4 x i16> [[A:%.*]], i64 0
-; VI-NEXT: [[ELT1:%.*]] = extractelement <4 x i16> [[A]], i64 1
-; VI-NEXT: [[ELT2:%.*]] = extractelement <4 x i16> [[A]], i64 2
-; VI-NEXT: [[ELT3:%.*]] = extractelement <4 x i16> [[A]], i64 3
-; VI-NEXT: [[ADD1:%.*]] = add i16 [[ELT1]], [[ELT0]]
-; VI-NEXT: [[ADD2:%.*]] = add i16 [[ELT2]], [[ADD1]]
-; VI-NEXT: [[ADD3:%.*]] = add i16 [[ELT3]], [[ADD2]]
-; VI-NEXT: ret i16 [[ADD3]]
+; GCN-LABEL: @reduction_v4i16(
+; GCN-NEXT: entry:
+; GCN-NEXT: [[TMP0:%.*]] = call i16 @llvm.vector.reduce.add.v4i16(<4 x i16> [[A:%.*]])
+; GCN-NEXT: ret i16 [[TMP0]]
;
entry:
%elt0 = extractelement <4 x i16> %a, i64 0
@@ -213,29 +159,10 @@ entry:
}
define i16 @reduction_v8i16(<8 x i16> %vec8) {
-; GFX9-LABEL: @reduction_v8i16(
-; GFX9-NEXT: entry:
-; GFX9-NEXT: [[TMP0:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[VEC8:%.*]])
-; GFX9-NEXT: ret i16 [[TMP0]]
-;
-; VI-LABEL: @reduction_v8i16(
-; VI-NEXT: entry:
-; VI-NEXT: [[ELT0:%.*]] = extractelement <8 x i16> [[VEC8:%.*]], i64 0
-; VI-NEXT: [[ELT1:%.*]] = extractelement <8 x i16> [[VEC8]], i64 1
-; VI-NEXT: [[ELT2:%.*]] = extractelement <8 x i16> [[VEC8]], i64 2
-; VI-NEXT: [[ELT3:%.*]] = extractelement <8 x i16> [[VEC8]], i64 3
-; VI-NEXT: [[ELT4:%.*]] = extractelement <8 x i16> [[VEC8]], i64 4
-; VI-NEXT: [[ELT5:%.*]] = extractelement <8 x i16> [[VEC8]], i64 5
-; VI-NEXT: [[ELT6:%.*]] = extractelement <8 x i16> [[VEC8]], i64 6
-; VI-NEXT: [[ELT7:%.*]] = extractelement <8 x i16> [[VEC8]], i64 7
-; VI-NEXT: [[ADD1:%.*]] = add i16 [[ELT1]], [[ELT0]]
-; VI-NEXT: [[ADD2:%.*]] = add i16 [[ELT2]], [[ADD1]]
-; VI-NEXT: [[ADD3:%.*]] = add i16 [[ELT3]], [[ADD2]]
-; VI-NEXT: [[ADD4:%.*]] = add i16 [[ELT4]], [[ADD3]]
-; VI-NEXT: [[ADD5:%.*]] = add i16 [[ELT5]], [[ADD4]]
-; VI-NEXT: [[ADD6:%.*]] = add i16 [[ELT6]], [[ADD5]]
-; VI-NEXT: [[ADD7:%.*]] = add i16 [[ELT7]], [[ADD6]]
-; VI-NEXT: ret i16 [[ADD7]]
+; GCN-LABEL: @reduction_v8i16(
+; GCN-NEXT: entry:
+; GCN-NEXT: [[TMP0:%.*]] = call i16 @llvm.vector.reduce.add.v8i16(<8 x i16> [[VEC8:%.*]])
+; GCN-NEXT: ret i16 [[TMP0]]
;
entry:
%elt0 = extractelement <8 x i16> %vec8, i64 0
diff --git a/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll
index a7ca5b93c361..dd12c98af696 100644
--- a/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll
+++ b/llvm/test/Transforms/SeparateConstOffsetFromGEP/AMDGPU/reorder-gep.ll
@@ -288,8 +288,8 @@ entry:
define void @multiple_index_maybe_neg(ptr %in.ptr, i64 %in.idx1) {
; CHECK-LABEL: define void @multiple_index_maybe_neg(
; CHECK-SAME: ptr [[IN_PTR:%.*]], i64 [[IN_IDX1:%.*]]) #[[ATTR0]] {
-; CHECK-NEXT: [[CONST1:%.*]] = getelementptr inbounds [2 x <2 x i8>], ptr [[IN_PTR]], i64 0, i64 1
-; CHECK-NEXT: [[IDX1:%.*]] = getelementptr inbounds [2 x <2 x i8>], ptr [[CONST1]], i64 0, i64 [[IN_IDX1]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr [2 x <2 x i8>], ptr [[IN_PTR]], i64 0, i64 [[IN_IDX1]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr [2 x <2 x i8>], ptr [[TMP1]], i64 0, i64 1
; CHECK-NEXT: ret void
;
%const1 = getelementptr inbounds [2 x <2 x i8>], ptr %in.ptr, i64 0, i64 1
@@ -301,8 +301,8 @@ define void @multiple_index_nonneg(ptr %in.ptr, i64 %in.idx1) {
; CHECK-LABEL: define void @multiple_index_nonneg(
; CHECK-SAME: ptr [[IN_PTR:%.*]], i64 [[IN_IDX1:%.*]]) #[[ATTR0]] {
; CHECK-NEXT: [[IN_IDX1_NNEG:%.*]] = and i64 [[IN_IDX1]], 9223372036854775807
-; CHECK-NEXT: [[CONST1:%.*]] = getelementptr inbounds [2 x <2 x i8>], ptr [[IN_PTR]], i64 0, i64 1
-; CHECK-NEXT: [[IDX1:%.*]] = getelementptr inbounds [2 x <2 x i8>], ptr [[CONST1]], i64 0, i64 [[IN_IDX1_NNEG]]
+; CHECK-NEXT: [[TMP1:%.*]] = getelementptr inbounds [2 x <2 x i8>], ptr [[IN_PTR]], i64 0, i64 [[IN_IDX1_NNEG]]
+; CHECK-NEXT: [[TMP2:%.*]] = getelementptr inbounds [2 x <2 x i8>], ptr [[TMP1]], i64 0, i64 1
; CHECK-NEXT: ret void
;
%in.idx1.nneg = and i64 %in.idx1, 9223372036854775807
diff --git a/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll b/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll
index bb333941abf7..7dadeb5d72de 100644
--- a/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll
+++ b/llvm/test/Transforms/VectorCombine/AArch64/shuffletoidentity.ll
@@ -13,8 +13,21 @@ define <8 x i8> @trivial(<8 x i8> %a) {
ret <8 x i8> %r
}
-define <8 x i8> @add(<8 x i8> %a, <8 x i8> %b) {
-; CHECK-LABEL: @add(
+define <4 x i32> @add_same_operands(<4 x i32> %x) {
+; CHECK-LABEL: @add_same_operands(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ADD:%.*]] = add <4 x i32> [[SHUF]], [[SHUF]]
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[ADD]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i32> [[REVSHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %add = add <4 x i32> %shuf, %shuf
+ %revshuf = shufflevector <4 x i32> %add, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %revshuf
+}
+
+define <8 x i8> @add_different_operands(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: @add_different_operands(
; CHECK-NEXT: [[R:%.*]] = add <8 x i8> [[A:%.*]], [[B:%.*]]
; CHECK-NEXT: ret <8 x i8> [[R]]
;
@@ -317,6 +330,23 @@ define <8 x i8> @constantdiff2(<8 x i8> %a) {
ret <8 x i8> %r
}
+define <8 x half> @constantsplatf(<8 x half> %a) {
+; CHECK-LABEL: @constantsplatf(
+; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x half> [[A:%.*]], <8 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x half> [[A]], <8 x half> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[ABT:%.*]] = fadd <4 x half> [[AT]], <half 0xH4900, half 0xH4900, half 0xH4900, half 0xH4900>
+; CHECK-NEXT: [[ABB:%.*]] = fadd <4 x half> [[AB]], <half 0xH4900, half 0xH4900, half 0xH4900, half 0xH4900>
+; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x half> [[ABT]], <4 x half> [[ABB]], <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <8 x half> [[R]]
+;
+ %ab = shufflevector <8 x half> %a, <8 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %at = shufflevector <8 x half> %a, <8 x half> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %abt = fadd <4 x half> %at, <half 10.0, half 10.0, half 10.0, half 10.0>
+ %abb = fadd <4 x half> %ab, <half 10.0, half 10.0, half 10.0, half 10.0>
+ %r = shufflevector <4 x half> %abt, <4 x half> %abb, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x half> %r
+}
+
define <8 x i8> @inner_shuffle(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
; CHECK-LABEL: @inner_shuffle(
; CHECK-NEXT: [[TMP1:%.*]] = shufflevector <8 x i8> [[C:%.*]], <8 x i8> poison, <8 x i32> zeroinitializer
@@ -339,8 +369,23 @@ define <8 x i8> @inner_shuffle(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c) {
ret <8 x i8> %r
}
-define <8 x i8> @extrause_add(<8 x i8> %a, <8 x i8> %b) {
-; CHECK-LABEL: @extrause_add(
+define <4 x i32> @extrause_add_same_operands(<4 x i32> %x) {
+; CHECK-LABEL: @extrause_add_same_operands(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ADD:%.*]] = add <4 x i32> [[SHUF]], [[SHUF]]
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[ADD]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ADD2:%.*]] = add <4 x i32> [[SHUF]], [[REVSHUF]]
+; CHECK-NEXT: ret <4 x i32> [[ADD2]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %add = add <4 x i32> %shuf, %shuf
+ %revshuf = shufflevector <4 x i32> %add, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %add2 = add <4 x i32> %shuf, %revshuf
+ ret <4 x i32> %add2
+}
+
+define <8 x i8> @extrause_add_different_operands(<8 x i8> %a, <8 x i8> %b) {
+; CHECK-LABEL: @extrause_add_different_operands(
; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x i8> [[A:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
; CHECK-NEXT: [[BB:%.*]] = shufflevector <8 x i8> [[B:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
@@ -413,6 +458,72 @@ define <8 x i8> @icmpsel(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
ret <8 x i8> %r
}
+define <8 x i8> @icmpsel_diffentcond(<8 x i8> %a, <8 x i8> %b, <8 x i8> %c, <8 x i8> %d) {
+; CHECK-LABEL: @icmpsel_diffentcond(
+; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x i8> [[A:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x i8> [[A]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[BB:%.*]] = shufflevector <8 x i8> [[B:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[BT:%.*]] = shufflevector <8 x i8> [[B]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[CB:%.*]] = shufflevector <8 x i8> [[C:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[CT:%.*]] = shufflevector <8 x i8> [[C]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[DB:%.*]] = shufflevector <8 x i8> [[D:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[DT:%.*]] = shufflevector <8 x i8> [[D]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[ABT1:%.*]] = icmp slt <4 x i8> [[AT]], [[BT]]
+; CHECK-NEXT: [[ABB1:%.*]] = icmp ult <4 x i8> [[AB]], [[BB]]
+; CHECK-NEXT: [[ABT:%.*]] = select <4 x i1> [[ABT1]], <4 x i8> [[CT]], <4 x i8> [[DT]]
+; CHECK-NEXT: [[ABB:%.*]] = select <4 x i1> [[ABB1]], <4 x i8> [[CB]], <4 x i8> [[DB]]
+; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i8> [[ABT]], <4 x i8> [[ABB]], <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <8 x i8> [[R]]
+;
+ %ab = shufflevector <8 x i8> %a, <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %at = shufflevector <8 x i8> %a, <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %bb = shufflevector <8 x i8> %b, <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %bt = shufflevector <8 x i8> %b, <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %cb = shufflevector <8 x i8> %c, <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %ct = shufflevector <8 x i8> %c, <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %db = shufflevector <8 x i8> %d, <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %dt = shufflevector <8 x i8> %d, <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %abt1 = icmp slt <4 x i8> %at, %bt
+ %abb1 = icmp ult <4 x i8> %ab, %bb
+ %abt = select <4 x i1> %abt1, <4 x i8> %ct, <4 x i8> %dt
+ %abb = select <4 x i1> %abb1, <4 x i8> %cb, <4 x i8> %db
+ %r = shufflevector <4 x i8> %abt, <4 x i8> %abb, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i8> %r
+}
+
+define <8 x i8> @fcmpsel(<8 x half> %a, <8 x half> %b, <8 x i8> %c, <8 x i8> %d) {
+; CHECK-LABEL: @fcmpsel(
+; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x half> [[A:%.*]], <8 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[AT:%.*]] = shufflevector <8 x half> [[A]], <8 x half> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[BB:%.*]] = shufflevector <8 x half> [[B:%.*]], <8 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[BT:%.*]] = shufflevector <8 x half> [[B]], <8 x half> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[CB:%.*]] = shufflevector <8 x i8> [[C:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[CT:%.*]] = shufflevector <8 x i8> [[C]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[DB:%.*]] = shufflevector <8 x i8> [[D:%.*]], <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[DT:%.*]] = shufflevector <8 x i8> [[D]], <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+; CHECK-NEXT: [[ABT1:%.*]] = fcmp olt <4 x half> [[AT]], [[BT]]
+; CHECK-NEXT: [[ABB1:%.*]] = fcmp olt <4 x half> [[AB]], [[BB]]
+; CHECK-NEXT: [[ABT:%.*]] = select <4 x i1> [[ABT1]], <4 x i8> [[CT]], <4 x i8> [[DT]]
+; CHECK-NEXT: [[ABB:%.*]] = select <4 x i1> [[ABB1]], <4 x i8> [[CB]], <4 x i8> [[DB]]
+; CHECK-NEXT: [[R:%.*]] = shufflevector <4 x i8> [[ABT]], <4 x i8> [[ABB]], <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <8 x i8> [[R]]
+;
+ %ab = shufflevector <8 x half> %a, <8 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %at = shufflevector <8 x half> %a, <8 x half> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %bb = shufflevector <8 x half> %b, <8 x half> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %bt = shufflevector <8 x half> %b, <8 x half> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %cb = shufflevector <8 x i8> %c, <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %ct = shufflevector <8 x i8> %c, <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %db = shufflevector <8 x i8> %d, <8 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %dt = shufflevector <8 x i8> %d, <8 x i8> poison, <4 x i32> <i32 7, i32 6, i32 5, i32 4>
+ %abt1 = fcmp olt <4 x half> %at, %bt
+ %abb1 = fcmp olt <4 x half> %ab, %bb
+ %abt = select <4 x i1> %abt1, <4 x i8> %ct, <4 x i8> %dt
+ %abb = select <4 x i1> %abb1, <4 x i8> %cb, <4 x i8> %db
+ %r = shufflevector <4 x i8> %abt, <4 x i8> %abb, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
+ ret <8 x i8> %r
+}
+
define <8 x half> @fma(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
; CHECK-LABEL: @fma(
; CHECK-NEXT: [[R:%.*]] = call <8 x half> @llvm.fma.v8f16(<8 x half> [[A:%.*]], <8 x half> [[B:%.*]], <8 x half> [[C:%.*]])
@@ -430,6 +541,63 @@ define <8 x half> @fma(<8 x half> %a, <8 x half> %b, <8 x half> %c) {
ret <8 x half> %r
}
+define <4 x i64> @single_zext(<4 x i32> %x) {
+; CHECK-LABEL: @single_zext(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[SHUF]] to <4 x i64>
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[ZEXT]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i64> [[REVSHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %zext = zext <4 x i32> %shuf to <4 x i64>
+ %revshuf = shufflevector <4 x i64> %zext, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i64> %revshuf
+}
+
+define <4 x i64> @not_zext(<4 x i32> %x) {
+; CHECK-LABEL: @not_zext(
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[X:%.*]] to <4 x i64>
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[ZEXT]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i64> [[REVSHUF]]
+;
+ %zext = zext <4 x i32> %x to <4 x i64>
+ %revshuf = shufflevector <4 x i64> %zext, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i64> %revshuf
+}
+
+define <4 x i64> @not_zext2(<4 x i32> %x) {
+; CHECK-LABEL: @not_zext2(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[SHUF]] to <4 x i64>
+; CHECK-NEXT: ret <4 x i64> [[ZEXT]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %zext = zext <4 x i32> %shuf to <4 x i64>
+ ret <4 x i64> %zext
+}
+
+define i32 @not_bitcast(<4 x i8> %x) {
+; CHECK-LABEL: @not_bitcast(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i8> [[X:%.*]], <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[BITCAST:%.*]] = bitcast <4 x i8> [[SHUF]] to i32
+; CHECK-NEXT: ret i32 [[BITCAST]]
+;
+ %shuf = shufflevector <4 x i8> %x, <4 x i8> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %bitcast = bitcast <4 x i8> %shuf to i32
+ ret i32 %bitcast
+}
+
+define <8 x i16> @not_bitcast2(<4 x i32> %x, <8 x i16> %y) {
+; CHECK-LABEL: @not_bitcast2(
+; CHECK-NEXT: [[CAST:%.*]] = bitcast <4 x i32> [[X:%.*]] to <8 x i16>
+; CHECK-NEXT: [[OUT:%.*]] = shufflevector <8 x i16> [[Y:%.*]], <8 x i16> [[CAST]], <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+; CHECK-NEXT: ret <8 x i16> [[OUT]]
+;
+ %cast = bitcast <4 x i32> %x to <8 x i16>
+ %out = shufflevector <8 x i16> %y, <8 x i16> %cast, <8 x i32> <i32 8, i32 1, i32 10, i32 3, i32 12, i32 5, i32 14, i32 7>
+ ret <8 x i16> %out
+}
+
define void @exttrunc(<8 x i32> %a, <8 x i32> %b, ptr %p) {
; CHECK-LABEL: @exttrunc(
; CHECK-NEXT: [[AB:%.*]] = shufflevector <8 x i32> [[A:%.*]], <8 x i32> poison, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
@@ -584,6 +752,51 @@ define void @trunc(<8 x i64> %a, <8 x i64> %b, ptr %p) {
ret void
}
+define <4 x i64> @zext_chain(<4 x i16> %x) {
+; CHECK-LABEL: @zext_chain(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i16> [[X:%.*]], <4 x i16> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i16> [[SHUF]] to <4 x i32>
+; CHECK-NEXT: [[SEXT:%.*]] = sext <4 x i32> [[ZEXT]] to <4 x i64>
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[SEXT]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i64> [[REVSHUF]]
+;
+ %shuf = shufflevector <4 x i16> %x, <4 x i16> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %zext = zext <4 x i16> %shuf to <4 x i32>
+ %sext = sext <4 x i32> %zext to <4 x i64>
+ %revshuf = shufflevector <4 x i64> %sext, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i64> %revshuf
+}
+
+define <4 x i32> @add_chain(<4 x i32> %x) {
+; CHECK-LABEL: @add_chain(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ADD:%.*]] = add <4 x i32> [[SHUF]], [[SHUF]]
+; CHECK-NEXT: [[ADD2:%.*]] = add <4 x i32> [[ADD]], [[ADD]]
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i32> [[ADD2]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i32> [[REVSHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %add = add <4 x i32> %shuf, %shuf
+ %add2 = add <4 x i32> %add, %add
+ %revshuf = shufflevector <4 x i32> %add2, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i32> %revshuf
+}
+
+define <4 x i64> @zext_add_chain(<4 x i32> %x) {
+; CHECK-LABEL: @zext_add_chain(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <4 x i32> [[SHUF]] to <4 x i64>
+; CHECK-NEXT: [[ADD:%.*]] = add <4 x i64> [[ZEXT]], [[ZEXT]]
+; CHECK-NEXT: [[REVSHUF:%.*]] = shufflevector <4 x i64> [[ADD]], <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+; CHECK-NEXT: ret <4 x i64> [[REVSHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ %zext = zext <4 x i32> %shuf to <4 x i64>
+ %add = add <4 x i64> %zext, %zext
+ %revshuf = shufflevector <4 x i64> %add, <4 x i64> poison, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
+ ret <4 x i64> %revshuf
+}
+
define <8 x i8> @intrinsics_minmax(<8 x i8> %a, <8 x i8> %b) {
; CHECK-LABEL: @intrinsics_minmax(
; CHECK-NEXT: [[TMP1:%.*]] = call <8 x i8> @llvm.smin.v8i8(<8 x i8> [[A:%.*]], <8 x i8> [[B:%.*]])
@@ -764,5 +977,33 @@ define <4 x i8> @singleop(<4 x i8> %a, <4 x i8> %b) {
ret <4 x i8> %r
}
+define <4 x i64> @cast_mismatched_types(<4 x i32> %x) {
+; CHECK-LABEL: @cast_mismatched_types(
+; CHECK-NEXT: [[SHUF:%.*]] = shufflevector <4 x i32> [[X:%.*]], <4 x i32> poison, <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[ZEXT:%.*]] = zext <2 x i32> [[SHUF]] to <2 x i64>
+; CHECK-NEXT: [[EXTSHUF:%.*]] = shufflevector <2 x i64> [[ZEXT]], <2 x i64> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: ret <4 x i64> [[EXTSHUF]]
+;
+ %shuf = shufflevector <4 x i32> %x, <4 x i32> poison, <2 x i32> <i32 0, i32 2>
+ %zext = zext <2 x i32> %shuf to <2 x i64>
+ %extshuf = shufflevector <2 x i64> %zext, <2 x i64> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ ret <4 x i64> %extshuf
+}
+
+define <4 x float> @fadd_mismatched_types(<4 x float> %x, <4 x float> %y) {
+; CHECK-LABEL: @fadd_mismatched_types(
+; CHECK-NEXT: [[SHUF_X:%.*]] = shufflevector <4 x float> [[X:%.*]], <4 x float> poison, <2 x i32> <i32 0, i32 2>
+; CHECK-NEXT: [[SHUF_Y:%.*]] = shufflevector <4 x float> [[Y:%.*]], <4 x float> poison, <2 x i32> <i32 1, i32 3>
+; CHECK-NEXT: [[FADD:%.*]] = fadd fast <2 x float> [[SHUF_X]], [[SHUF_Y]]
+; CHECK-NEXT: [[EXTSHUF:%.*]] = shufflevector <2 x float> [[FADD]], <2 x float> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+; CHECK-NEXT: ret <4 x float> [[EXTSHUF]]
+;
+ %shuf.x = shufflevector <4 x float> %x, <4 x float> poison, <2 x i32> <i32 0, i32 2>
+ %shuf.y = shufflevector <4 x float> %y, <4 x float> poison, <2 x i32> <i32 1, i32 3>
+ %fadd = fadd fast <2 x float> %shuf.x, %shuf.y
+ %extshuf = shufflevector <2 x float> %fadd, <2 x float> poison, <4 x i32> <i32 0, i32 2, i32 1, i32 3>
+ ret <4 x float> %extshuf
+}
+
declare void @use(<4 x i8>)
diff --git a/llvm/test/tools/llvm-driver/symlink-call.test b/llvm/test/tools/llvm-driver/symlink-call.test
index eeedf9edc73f..ca6098216b13 100644
--- a/llvm/test/tools/llvm-driver/symlink-call.test
+++ b/llvm/test/tools/llvm-driver/symlink-call.test
@@ -14,6 +14,8 @@
# RUN: %t/cxxfilt-15 --help | FileCheck %s
# RUN: ln -s %llvm %t/cxxfilt-15.exe
# RUN: %t/cxxfilt-15.exe --help | FileCheck %s
+# RUN: ln -s %llvm %t/c++filt
+# RUN: %t/c++filt --help | FileCheck %s
# RUN: ln -s %llvm %t/llvm-15
# RUN: %t/llvm-15 cxxfilt --help | FileCheck %s
diff --git a/llvm/tools/llvm-cxxfilt/CMakeLists.txt b/llvm/tools/llvm-cxxfilt/CMakeLists.txt
index cbc4c2db6154..a644baffdd90 100644
--- a/llvm/tools/llvm-cxxfilt/CMakeLists.txt
+++ b/llvm/tools/llvm-cxxfilt/CMakeLists.txt
@@ -17,6 +17,10 @@ add_llvm_tool(llvm-cxxfilt
GENERATE_DRIVER
)
+if(LLVM_TOOL_LLVM_DRIVER_BUILD)
+ set_property(GLOBAL APPEND PROPERTY LLVM_DRIVER_HIDDEN_TOOL_ALIASES_llvm-cxxfilt c++filt)
+endif()
+
if(LLVM_INSTALL_BINUTILS_SYMLINKS)
add_llvm_tool_symlink(c++filt llvm-cxxfilt)
endif()
diff --git a/llvm/tools/llvm-link/llvm-link.cpp b/llvm/tools/llvm-link/llvm-link.cpp
index 7794f2d81ed0..1b90fce76fbd 100644
--- a/llvm/tools/llvm-link/llvm-link.cpp
+++ b/llvm/tools/llvm-link/llvm-link.cpp
@@ -377,9 +377,13 @@ static bool importFunctions(const char *argv0, Module &DestModule) {
if (Verbose)
errs() << "Importing " << FunctionName << " from " << FileName << "\n";
+ // `-import` specifies the `<filename,function-name>` pairs to import as
+ // definition, so make the import type definition directly.
+ // FIXME: A follow-up patch should add test coverage for import declaration
+ // in `llvm-link` CLI (e.g., by introducing a new command line option).
auto &Entry =
ImportList[FileNameStringCache.insert(FileName).first->getKey()];
- Entry.insert(F->getGUID());
+ Entry[F->getGUID()] = GlobalValueSummary::Definition;
}
auto CachedModuleLoader = [&](StringRef Identifier) {
return ModuleLoaderCache.takeModule(std::string(Identifier));
diff --git a/llvm/tools/llvm-profdata/llvm-profdata.cpp b/llvm/tools/llvm-profdata/llvm-profdata.cpp
index 4126b55576dd..693af066bc0f 100644
--- a/llvm/tools/llvm-profdata/llvm-profdata.cpp
+++ b/llvm/tools/llvm-profdata/llvm-profdata.cpp
@@ -75,7 +75,6 @@ cl::SubCommand MergeSubcommand(
namespace {
enum ProfileKinds { instr, sample, memory };
enum FailureMode { warnOnly, failIfAnyAreInvalid, failIfAllAreInvalid };
-} // namespace
enum ProfileFormat {
PF_None = 0,
@@ -87,6 +86,7 @@ enum ProfileFormat {
};
enum class ShowFormat { Text, Json, Yaml };
+} // namespace
// Common options.
cl::opt<std::string> OutputFilename("output", cl::value_desc("output"),
@@ -443,8 +443,7 @@ cl::opt<bool> ShowProfileVersion("profile-version", cl::init(false),
// multiple static functions map to the same name.
const std::string DuplicateNameStr = "----";
-static void warn(Twine Message, std::string Whence = "",
- std::string Hint = "") {
+static void warn(Twine Message, StringRef Whence = "", StringRef Hint = "") {
WithColor::warning();
if (!Whence.empty())
errs() << Whence << ": ";
@@ -456,13 +455,13 @@ static void warn(Twine Message, std::string Whence = "",
static void warn(Error E, StringRef Whence = "") {
if (E.isA<InstrProfError>()) {
handleAllErrors(std::move(E), [&](const InstrProfError &IPE) {
- warn(IPE.message(), std::string(Whence), std::string(""));
+ warn(IPE.message(), Whence);
});
}
}
-static void exitWithError(Twine Message, std::string Whence = "",
- std::string Hint = "") {
+static void exitWithError(Twine Message, StringRef Whence = "",
+ StringRef Hint = "") {
WithColor::error();
if (!Whence.empty())
errs() << Whence << ": ";
@@ -481,16 +480,16 @@ static void exitWithError(Error E, StringRef Whence = "") {
// Hint in case user missed specifying the profile type.
Hint = "Perhaps you forgot to use the --sample or --memory option?";
}
- exitWithError(IPE.message(), std::string(Whence), std::string(Hint));
+ exitWithError(IPE.message(), Whence, Hint);
});
return;
}
- exitWithError(toString(std::move(E)), std::string(Whence));
+ exitWithError(toString(std::move(E)), Whence);
}
static void exitWithErrorCode(std::error_code EC, StringRef Whence = "") {
- exitWithError(EC.message(), std::string(Whence));
+ exitWithError(EC.message(), Whence);
}
static void warnOrExitGivenError(FailureMode FailMode, std::error_code EC,
@@ -498,7 +497,7 @@ static void warnOrExitGivenError(FailureMode FailMode, std::error_code EC,
if (FailMode == failIfAnyAreInvalid)
exitWithErrorCode(EC, Whence);
else
- warn(EC.message(), std::string(Whence));
+ warn(EC.message(), Whence);
}
static void handleMergeWriterError(Error E, StringRef WhenceFile = "",
@@ -1585,7 +1584,7 @@ static void mergeSampleProfile(const WeightedFileVector &Inputs,
// If OutputSizeLimit is 0 (default), it is the same as write().
if (std::error_code EC =
Writer->writeWithSizeLimit(ProfileMap, OutputSizeLimit))
- exitWithErrorCode(std::move(EC));
+ exitWithErrorCode(EC);
}
static WeightedFile parseWeightedFile(const StringRef &WeightedFilename) {
diff --git a/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp b/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
index 4cb76f434742..06ac98b0c5e1 100644
--- a/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
+++ b/llvm/tools/llvm-rtdyld/llvm-rtdyld.cpp
@@ -649,9 +649,9 @@ void applySpecificSectionMappings(RuntimeDyld &Dyld,
const FileToSectionIDMap &FileToSecIDMap) {
for (StringRef Mapping : SpecificSectionMappings) {
- size_t EqualsIdx = Mapping.find_first_of("=");
+ size_t EqualsIdx = Mapping.find_first_of('=');
std::string SectionIDStr = std::string(Mapping.substr(0, EqualsIdx));
- size_t ComaIdx = Mapping.find_first_of(",");
+ size_t ComaIdx = Mapping.find_first_of(',');
if (ComaIdx == StringRef::npos)
report_fatal_error("Invalid section specification '" + Mapping +
diff --git a/llvm/unittests/Analysis/ValueTrackingTest.cpp b/llvm/unittests/Analysis/ValueTrackingTest.cpp
index 8738af91b652..a30db468c772 100644
--- a/llvm/unittests/Analysis/ValueTrackingTest.cpp
+++ b/llvm/unittests/Analysis/ValueTrackingTest.cpp
@@ -2005,7 +2005,7 @@ TEST_F(ComputeKnownFPClassTest, SqrtNszSignBit) {
computeKnownFPClass(A4, M->getDataLayout(), fcAllFlags, 0, nullptr,
nullptr, nullptr, nullptr, /*UseInstrInfo=*/true);
EXPECT_EQ(fcPositive | fcQNan, UseInstrInfoNSZNoNan.KnownFPClasses);
- EXPECT_EQ(false, UseInstrInfoNSZNoNan.SignBit);
+ EXPECT_EQ(std::nullopt, UseInstrInfoNSZNoNan.SignBit);
KnownFPClass NoUseInstrInfoNSZNoNan =
computeKnownFPClass(A4, M->getDataLayout(), fcAllFlags, 0, nullptr,
diff --git a/llvm/unittests/IR/MDBuilderTest.cpp b/llvm/unittests/IR/MDBuilderTest.cpp
index 2b5ab81b6066..4656c70ce9ca 100644
--- a/llvm/unittests/IR/MDBuilderTest.cpp
+++ b/llvm/unittests/IR/MDBuilderTest.cpp
@@ -127,4 +127,43 @@ TEST_F(MDBuilderTest, createPCSections) {
EXPECT_EQ(mdconst::extract<ConstantInt>(Aux->getOperand(1))->getValue(),
C2->getValue());
}
+TEST_F(MDBuilderTest, createCallbackAndMerge) {
+ MDBuilder MDHelper(Context);
+ auto *CB1 = MDHelper.createCallbackEncoding(0, {1, -1}, false);
+ auto *CB2 = MDHelper.createCallbackEncoding(2, {-1}, false);
+ ASSERT_EQ(CB1->getNumOperands(), 4U);
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB1->getOperand(0)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB1->getOperand(1)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB1->getOperand(2)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB1->getOperand(3)));
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB1->getOperand(0))->getValue(), 0);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB1->getOperand(1))->getValue(), 1);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB1->getOperand(2))->getValue(), -1);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB1->getOperand(3))->getValue(),
+ false);
+ ASSERT_EQ(CB2->getNumOperands(), 3U);
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB2->getOperand(0)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB2->getOperand(1)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB2->getOperand(2)));
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB2->getOperand(0))->getValue(), 2);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB2->getOperand(1))->getValue(), -1);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB2->getOperand(2))->getValue(),
+ false);
+ auto *CBList = MDNode::get(Context, {CB1, CB2});
+ auto *CB3 = MDHelper.createCallbackEncoding(4, {5}, false);
+ auto *NewCBList = MDHelper.mergeCallbackEncodings(CBList, CB3);
+ ASSERT_EQ(NewCBList->getNumOperands(), 3U);
+ EXPECT_TRUE(NewCBList->getOperand(0) == CB1);
+ EXPECT_TRUE(NewCBList->getOperand(1) == CB2);
+ EXPECT_TRUE(NewCBList->getOperand(2) == CB3);
+
+ ASSERT_EQ(CB3->getNumOperands(), 3U);
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB3->getOperand(0)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB3->getOperand(1)));
+ ASSERT_TRUE(isa<ConstantAsMetadata>(CB3->getOperand(2)));
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB3->getOperand(0))->getValue(), 4);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB3->getOperand(1))->getValue(), 5);
+ EXPECT_EQ(mdconst::extract<ConstantInt>(CB3->getOperand(2))->getValue(),
+ false);
+}
} // namespace
diff --git a/llvm/unittests/Support/raw_socket_stream_test.cpp b/llvm/unittests/Support/raw_socket_stream_test.cpp
index a8536228666d..c4e8cfbbe7e6 100644
--- a/llvm/unittests/Support/raw_socket_stream_test.cpp
+++ b/llvm/unittests/Support/raw_socket_stream_test.cpp
@@ -7,7 +7,6 @@
#include "llvm/Testing/Support/Error.h"
#include "gtest/gtest.h"
#include <future>
-#include <iostream>
#include <stdlib.h>
#include <thread>
@@ -86,13 +85,8 @@ TEST(raw_socket_streamTest, TIMEOUT_PROVIDED) {
std::chrono::milliseconds Timeout = std::chrono::milliseconds(100);
Expected<std::unique_ptr<raw_socket_stream>> MaybeServer =
ServerListener.accept(Timeout);
-
- ASSERT_THAT_EXPECTED(MaybeServer, Failed());
- llvm::Error Err = MaybeServer.takeError();
- llvm::handleAllErrors(std::move(Err), [&](const llvm::StringError &SE) {
- std::error_code EC = SE.convertToErrorCode();
- ASSERT_EQ(EC, std::errc::timed_out);
- });
+ ASSERT_EQ(llvm::errorToErrorCode(MaybeServer.takeError()),
+ std::errc::timed_out);
}
TEST(raw_socket_streamTest, FILE_DESCRIPTOR_CLOSED) {
@@ -122,12 +116,7 @@ TEST(raw_socket_streamTest, FILE_DESCRIPTOR_CLOSED) {
// Wait for the CloseThread to finish
CloseThread.join();
-
- ASSERT_THAT_EXPECTED(MaybeServer, Failed());
- llvm::Error Err = MaybeServer.takeError();
- llvm::handleAllErrors(std::move(Err), [&](const llvm::StringError &SE) {
- std::error_code EC = SE.convertToErrorCode();
- ASSERT_EQ(EC, std::errc::operation_canceled);
- });
+ ASSERT_EQ(llvm::errorToErrorCode(MaybeServer.takeError()),
+ std::errc::operation_canceled);
}
} // namespace
diff --git a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
index 22fe31809319..df4066980093 100644
--- a/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
+++ b/llvm/unittests/TargetParser/RISCVISAInfoTest.cpp
@@ -934,7 +934,9 @@ R"(All available -march extensions for RISC-V
zmmul 1.0
za128rs 1.0
za64rs 1.0
+ zaamo 1.0
zacas 1.0
+ zalrsc 1.0
zama16b 1.0
zawrs 1.0
zfa 1.0
@@ -1060,10 +1062,8 @@ R"(All available -march extensions for RISC-V
Experimental extensions
zicfilp 0.4 This is a long dummy description
zicfiss 0.4
- zaamo 0.2
zabha 1.0
zalasr 0.1
- zalrsc 0.2
zfbfmin 1.0
ztso 0.1
zvfbfmin 1.0
diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
index 88d353e89a46..709aa00ae8b3 100644
--- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
+++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.cpp
@@ -903,7 +903,7 @@ TreePredicateFn::TreePredicateFn(TreePattern *N) : PatFragRec(N) {
}
bool TreePredicateFn::hasPredCode() const {
- return isLoad() || isStore() || isAtomic() || hasNoUse() ||
+ return isLoad() || isStore() || isAtomic() || hasNoUse() || hasOneUse() ||
!PatFragRec->getRecord()->getValueAsString("PredicateCode").empty();
}
@@ -1140,6 +1140,8 @@ std::string TreePredicateFn::getPredCode() const {
if (hasNoUse())
Code += "if (!SDValue(N, 0).use_empty()) return false;\n";
+ if (hasOneUse())
+ Code += "if (!SDValue(N, 0).hasOneUse()) return false;\n";
std::string PredicateCode =
std::string(PatFragRec->getRecord()->getValueAsString("PredicateCode"));
@@ -1187,6 +1189,9 @@ bool TreePredicateFn::usesOperands() const {
bool TreePredicateFn::hasNoUse() const {
return isPredefinedPredicateEqualTo("HasNoUse", true);
}
+bool TreePredicateFn::hasOneUse() const {
+ return isPredefinedPredicateEqualTo("HasOneUse", true);
+}
bool TreePredicateFn::isLoad() const {
return isPredefinedPredicateEqualTo("IsLoad", true);
}
diff --git a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
index 7f94db0b7d5d..1f4d45d81fd3 100644
--- a/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
+++ b/llvm/utils/TableGen/Common/CodeGenDAGPatterns.h
@@ -533,6 +533,8 @@ public:
// Check if the HasNoUse predicate is set.
bool hasNoUse() const;
+ // Check if the HasOneUse predicate is set.
+ bool hasOneUse() const;
// Is the desired predefined predicate for a load?
bool isLoad() const;
diff --git a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h
index 5fe3f9a32c01..edddc051c162 100644
--- a/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h
+++ b/llvm/utils/TableGen/Common/GlobalISel/GlobalISelMatchTable.h
@@ -806,6 +806,7 @@ public:
IPM_MemoryAlignment,
IPM_VectorSplatImm,
IPM_NoUse,
+ IPM_OneUse,
IPM_GenericPredicate,
IPM_MIFlags,
OPM_SameOperand,
@@ -1691,6 +1692,28 @@ public:
}
};
+/// Generates code to check that the first result has only one use.
+class OneUsePredicateMatcher : public InstructionPredicateMatcher {
+public:
+ OneUsePredicateMatcher(unsigned InsnVarID)
+ : InstructionPredicateMatcher(IPM_OneUse, InsnVarID) {}
+
+ static bool classof(const PredicateMatcher *P) {
+ return P->getKind() == IPM_OneUse;
+ }
+
+ bool isIdentical(const PredicateMatcher &B) const override {
+ return InstructionPredicateMatcher::isIdentical(B);
+ }
+
+ void emitPredicateOpcodes(MatchTable &Table,
+ RuleMatcher &Rule) const override {
+ Table << MatchTable::Opcode("GIM_CheckHasOneUse")
+ << MatchTable::Comment("MI") << MatchTable::ULEB128Value(InsnVarID)
+ << MatchTable::LineBreak;
+ }
+};
+
/// Generates code to check that a set of predicates and operands match for a
/// particular instruction.
///
diff --git a/llvm/utils/TableGen/GlobalISelEmitter.cpp b/llvm/utils/TableGen/GlobalISelEmitter.cpp
index 9b356148cc17..ec41cd9fec07 100644
--- a/llvm/utils/TableGen/GlobalISelEmitter.cpp
+++ b/llvm/utils/TableGen/GlobalISelEmitter.cpp
@@ -207,7 +207,7 @@ static Error isTrivialOperatorNode(const TreePatternNode &N) {
if (Predicate.isImmediatePattern())
continue;
- if (Predicate.hasNoUse())
+ if (Predicate.hasNoUse() || Predicate.hasOneUse())
continue;
if (Predicate.isNonExtLoad() || Predicate.isAnyExtLoad() ||
@@ -782,6 +782,10 @@ Expected<InstructionMatcher &> GlobalISelEmitter::createAndImportSelDAGMatcher(
InsnMatcher.addPredicate<NoUsePredicateMatcher>();
HasAddedBuiltinMatcher = true;
}
+ if (Predicate.hasOneUse()) {
+ InsnMatcher.addPredicate<OneUsePredicateMatcher>();
+ HasAddedBuiltinMatcher = true;
+ }
if (Predicate.hasGISelPredicateCode()) {
if (Predicate.usesOperands()) {
diff --git a/llvm/utils/gn/secondary/clang/lib/Analysis/FlowSensitive/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Analysis/FlowSensitive/BUILD.gn
index 22433459a787..393596186c0c 100644
--- a/llvm/utils/gn/secondary/clang/lib/Analysis/FlowSensitive/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/Analysis/FlowSensitive/BUILD.gn
@@ -26,6 +26,7 @@ static_library("FlowSensitive") {
"ASTOps.cpp",
"AdornedCFG.cpp",
"Arena.cpp",
+ "CNFFormula.cpp",
"DataflowAnalysisContext.cpp",
"DataflowEnvironment.cpp",
"DebugSupport.cpp",
diff --git a/llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn b/llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn
index f6c9526278dd..188c71805f27 100644
--- a/llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn
+++ b/llvm/utils/gn/secondary/clang/lib/Sema/BUILD.gn
@@ -84,7 +84,7 @@ static_library("Sema") {
"SemaOpenMP.cpp",
"SemaOverload.cpp",
"SemaPseudoObject.cpp",
- "SemaRISCVVectorLookup.cpp",
+ "SemaRISCV.cpp",
"SemaSYCL.cpp",
"SemaStmt.cpp",
"SemaStmtAsm.cpp",
diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
index 210b26e8f166..c51e4bf037db 100644
--- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
+++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
@@ -294,6 +294,7 @@ if (current_toolchain == default_toolchain) {
"__atomic/atomic_flag.h",
"__atomic/atomic_init.h",
"__atomic/atomic_lock_free.h",
+ "__atomic/atomic_ref.h",
"__atomic/atomic_sync.h",
"__atomic/check_memory_order.h",
"__atomic/contention_t.h",
@@ -302,6 +303,7 @@ if (current_toolchain == default_toolchain) {
"__atomic/is_always_lock_free.h",
"__atomic/kill_dependency.h",
"__atomic/memory_order.h",
+ "__atomic/to_gcc_order.h",
"__availability",
"__bit/bit_cast.h",
"__bit/bit_ceil.h",
diff --git a/llvm/utils/revert_checker.py b/llvm/utils/revert_checker.py
index 34395a6fe505..da80bdff8685 100755
--- a/llvm/utils/revert_checker.py
+++ b/llvm/utils/revert_checker.py
@@ -283,17 +283,12 @@ def _main() -> None:
seen_reverts.add(revert)
all_reverts.append(revert)
+ sha_prefix = (
+ "https://github.com/llvm/llvm-project/commit/" if opts.review_url else ""
+ )
for revert in all_reverts:
- sha_fmt = (
- f"https://reviews.llvm.org/rG{revert.sha}"
- if opts.review_url
- else revert.sha
- )
- reverted_sha_fmt = (
- f"https://reviews.llvm.org/rG{revert.reverted_sha}"
- if opts.review_url
- else revert.reverted_sha
- )
+ sha_fmt = f"{sha_prefix}{revert.sha}"
+ reverted_sha_fmt = f"{sha_prefix}{revert.reverted_sha}"
print(f"{sha_fmt} claims to revert {reverted_sha_fmt}")
diff --git a/mlir/include/mlir/Dialect/IRDL/IR/IRDLOps.td b/mlir/include/mlir/Dialect/IRDL/IR/IRDLOps.td
index aa6a8e93c028..d2765dec420a 100644
--- a/mlir/include/mlir/Dialect/IRDL/IR/IRDLOps.td
+++ b/mlir/include/mlir/Dialect/IRDL/IR/IRDLOps.td
@@ -503,7 +503,8 @@ def IRDL_BaseOp : IRDL_ConstraintOp<"base",
}
def IRDL_ParametricOp : IRDL_ConstraintOp<"parametric",
- [ParentOneOf<["TypeOp", "AttributeOp", "OperationOp"]>, Pure]> {
+ [ParentOneOf<["TypeOp", "AttributeOp", "OperationOp"]>,
+ DeclareOpInterfaceMethods<SymbolUserOpInterface>, Pure]> {
let summary = "Constraints an attribute/type base and its parameters";
let description = [{
`irdl.parametric` defines a constraint that accepts only a single type
diff --git a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
index 7ffbc2d7922f..4daeeab09386 100644
--- a/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
+++ b/mlir/include/mlir/Dialect/LLVMIR/NVVMOps.td
@@ -429,7 +429,7 @@ def NVVM_BarrierArriveOp : NVVM_PTXBuilder_Op<"barrier.arrive">
let extraClassDefinition = [{
std::string $cppClass::getPtx() {
std::string ptx = "bar.arrive ";
- if (getBarrierId()) { ptx += "%0, %1"; }
+ if (getBarrierId()) { ptx += "%0, %1;"; }
else { ptx += "0, %0;"; }
return ptx;
}
diff --git a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
index f92843a1dcb9..08afdf373f01 100644
--- a/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
+++ b/mlir/include/mlir/Dialect/Linalg/IR/LinalgInterfaces.h
@@ -28,6 +28,7 @@ namespace mlir {
namespace linalg {
class IteratorTypeAttr;
class LinalgOp;
+class GenericOp;
namespace detail {
/// Implementation of the method that check if given operands
@@ -115,6 +116,21 @@ bool isaConvolutionOpInterface(LinalgOp linalgOp);
/// Checks whether `linalgOp` is semantically equivalent to a `linalg.copyOp`.
bool isaCopyOpInterface(LinalgOp linalgOp);
+/// Checks whether a given `genericOp` is semantically equivalent to a single
+/// linalgelementwise unary op. e.g. linalg.exp.
+/// A linalg.generic body could be a series of unary elementwise ops e.g.
+/// `exp(neg(x))`, such as formed by linalg op fusion. Here we restrict it to
+/// detecting cases where body is is a single computation op.
+bool isaElemwiseSingleUnaryOpInterface(GenericOp genericOp);
+
+/// Checks whether `genericOp` is semantically equivalent to a single linalg
+/// elementwise binary op e.g. linalg.sub.
+bool isaElemwiseSingleBinaryOpInterface(GenericOp genericOp);
+
+/// Checks whether `genericOp` is semantically equivalent to a `linalg.fill`.
+/// Returns the scalar fill value if true.
+std::optional<Value> isaFillOpInterface(GenericOp genericOp);
+
namespace detail {
/// Returns true if the block contains a contraction of the following form:
diff --git a/mlir/include/mlir/Dialect/Math/Transforms/Passes.h b/mlir/include/mlir/Dialect/Math/Transforms/Passes.h
index ba6977251564..2dd7f6431f03 100644
--- a/mlir/include/mlir/Dialect/Math/Transforms/Passes.h
+++ b/mlir/include/mlir/Dialect/Math/Transforms/Passes.h
@@ -14,10 +14,6 @@
namespace mlir {
namespace math {
#define GEN_PASS_DECL
-#include "mlir/Dialect/Math/Transforms/Passes.h.inc"
-#define GEN_PASS_DECL_MATHUPLIFTTOFMA
-#define GEN_PASS_DECL_MATHLEGALIZETOF32
-#include "mlir/Dialect/Math/Transforms/Passes.h.inc"
#define GEN_PASS_REGISTRATION
#include "mlir/Dialect/Math/Transforms/Passes.h.inc"
} // namespace math
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPAttrDefs.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPAttrDefs.td
new file mode 100644
index 000000000000..704d0b2220e8
--- /dev/null
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPAttrDefs.td
@@ -0,0 +1,79 @@
+//=== OpenMPAttrDefs.td - OpenMP Attributes definition -----*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef OPENMP_ATTR_DEFS
+#define OPENMP_ATTR_DEFS
+
+include "mlir/Dialect/OpenMP/OpenMPDialect.td"
+include "mlir/Dialect/OpenMP/OpenMPEnums.td"
+include "mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td"
+include "mlir/Dialect/OpenMP/OpenMPTypeInterfaces.td"
+include "mlir/IR/AttrTypeBase.td"
+include "mlir/IR/CommonAttrConstraints.td"
+
+class OpenMP_Attr<string name, string attrMnemonic, list<Trait> traits = [],
+ string baseCppClass = "::mlir::Attribute">
+ : AttrDef<OpenMP_Dialect, name, traits, baseCppClass> {
+ let mnemonic = attrMnemonic;
+}
+
+//===----------------------------------------------------------------------===//
+// DeclareTargetAttr
+//===----------------------------------------------------------------------===//
+
+def DeclareTargetAttr : OpenMP_Attr<"DeclareTarget", "declaretarget"> {
+ let parameters = (ins
+ OptionalParameter<"DeclareTargetDeviceTypeAttr">:$device_type,
+ OptionalParameter<"DeclareTargetCaptureClauseAttr">:$capture_clause
+ );
+
+ let assemblyFormat = "`<` struct(params) `>`";
+}
+
+//===----------------------------------------------------------------------===//
+// FlagsAttr
+//===----------------------------------------------------------------------===//
+
+// Runtime library flags attribute that holds information for lowering to LLVM.
+def FlagsAttr : OpenMP_Attr<"Flags", "flags"> {
+ let parameters = (ins
+ DefaultValuedParameter<"uint32_t", "0">:$debug_kind,
+ DefaultValuedParameter<"bool", "false">:$assume_teams_oversubscription,
+ DefaultValuedParameter<"bool", "false">:$assume_threads_oversubscription,
+ DefaultValuedParameter<"bool", "false">:$assume_no_thread_state,
+ DefaultValuedParameter<"bool", "false">:$assume_no_nested_parallelism,
+ DefaultValuedParameter<"bool", "false">:$no_gpu_lib,
+ DefaultValuedParameter<"uint32_t", "50">:$openmp_device_version
+ );
+
+ let assemblyFormat = "`<` struct(params) `>`";
+}
+
+//===----------------------------------------------------------------------===//
+// TaskDependArrayAttr
+//===----------------------------------------------------------------------===//
+
+def TaskDependArrayAttr
+ : TypedArrayAttrBase<ClauseTaskDependAttr,
+ ClauseTaskDependAttr.summary # " array"> {
+ let constBuilderCall = ?;
+}
+
+//===----------------------------------------------------------------------===//
+// VersionAttr
+//===----------------------------------------------------------------------===//
+
+def VersionAttr : OpenMP_Attr<"Version", "version"> {
+ let parameters = (ins
+ "uint32_t":$version
+ );
+
+ let assemblyFormat = "`<` struct(params) `>`";
+}
+
+#endif // OPENMP_ATTR_DEFS
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPDialect.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPDialect.td
new file mode 100644
index 000000000000..459cc7843580
--- /dev/null
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPDialect.td
@@ -0,0 +1,22 @@
+//===- OpenMPDialect.td - OpenMP dialect definition --------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef OPENMP_DIALECT
+#define OPENMP_DIALECT
+
+include "mlir/IR/DialectBase.td"
+
+def OpenMP_Dialect : Dialect {
+ let name = "omp";
+ let cppNamespace = "::mlir::omp";
+ let dependentDialects = ["::mlir::LLVM::LLVMDialect, ::mlir::func::FuncDialect"];
+ let useDefaultAttributePrinterParser = 1;
+ let useDefaultTypePrinterParser = 1;
+}
+
+#endif // OPENMP_DIALECT
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td
new file mode 100644
index 000000000000..bf3d33819e9a
--- /dev/null
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPEnums.td
@@ -0,0 +1,211 @@
+//===-- OpenMPEnums.td - OpenMP dialect enum file ----------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef OPENMP_ENUMS
+#define OPENMP_ENUMS
+
+include "mlir/Dialect/OpenMP/OpenMPDialect.td"
+include "mlir/IR/EnumAttr.td"
+
+include "mlir/Dialect/OpenMP/OmpCommon.td"
+
+//===----------------------------------------------------------------------===//
+// Base classes for OpenMP enum attributes.
+//===----------------------------------------------------------------------===//
+
+class OpenMP_I32EnumAttr<string name, string summary,
+ list<I32EnumAttrCase> cases>
+ : I32EnumAttr<name, summary, cases> {
+ let genSpecializedAttr = 0;
+ let cppNamespace = "::mlir::omp";
+}
+
+class OpenMP_BitEnumAttr<string name, string summary,
+ list<BitEnumAttrCaseBase> cases>
+ : I32BitEnumAttr<name, summary, cases> {
+ let genSpecializedAttr = 0;
+ let cppNamespace = "::mlir::omp";
+}
+
+class OpenMP_EnumAttr<EnumAttrInfo enumInfo, string name>
+ : EnumAttr<OpenMP_Dialect, enumInfo, name>;
+
+
+//===----------------------------------------------------------------------===//
+// capture_clause enum.
+//===----------------------------------------------------------------------===//
+
+def CaptureClauseTo : I32EnumAttrCase<"to", 0>;
+def CaptureClauseLink : I32EnumAttrCase<"link", 1>;
+def CaptureClauseEnter : I32EnumAttrCase<"enter", 2>;
+
+def DeclareTargetCaptureClause : OpenMP_I32EnumAttr<
+ "DeclareTargetCaptureClause",
+ "capture clause", [
+ CaptureClauseTo,
+ CaptureClauseLink,
+ CaptureClauseEnter
+ ]>;
+
+def DeclareTargetCaptureClauseAttr : OpenMP_EnumAttr<DeclareTargetCaptureClause,
+ "capture_clause"> {
+ let assemblyFormat = "`(` $value `)`";
+}
+
+//===----------------------------------------------------------------------===//
+// clause_depend enum.
+//===----------------------------------------------------------------------===//
+
+def ClauseDependSource : I32EnumAttrCase<"dependsource", 0>;
+def ClauseDependSink : I32EnumAttrCase<"dependsink", 1>;
+
+def ClauseDepend : OpenMP_I32EnumAttr<
+ "ClauseDepend",
+ "depend clause", [
+ ClauseDependSource,
+ ClauseDependSink
+ ]>;
+
+def ClauseDependAttr : OpenMP_EnumAttr<ClauseDepend, "clause_depend"> {
+ let assemblyFormat = "`(` $value `)`";
+}
+
+//===----------------------------------------------------------------------===//
+// clause_requires enum.
+//===----------------------------------------------------------------------===//
+
+// atomic_default_mem_order clause values not defined here because they can be
+// represented by the OMPC_MemoryOrder enumeration instead.
+def ClauseRequiresNone : I32BitEnumAttrCaseNone<"none">;
+def ClauseRequiresReverseOffload : I32BitEnumAttrCaseBit<"reverse_offload", 0>;
+def ClauseRequiresUnifiedAddress : I32BitEnumAttrCaseBit<"unified_address", 1>;
+def ClauseRequiresUnifiedSharedMemory
+ : I32BitEnumAttrCaseBit<"unified_shared_memory", 2>;
+def ClauseRequiresDynamicAllocators
+ : I32BitEnumAttrCaseBit<"dynamic_allocators", 3>;
+
+def ClauseRequires : OpenMP_BitEnumAttr<
+ "ClauseRequires",
+ "requires clauses", [
+ ClauseRequiresNone,
+ ClauseRequiresReverseOffload,
+ ClauseRequiresUnifiedAddress,
+ ClauseRequiresUnifiedSharedMemory,
+ ClauseRequiresDynamicAllocators
+ ]>;
+
+def ClauseRequiresAttr : OpenMP_EnumAttr<ClauseRequires, "clause_requires">;
+
+//===----------------------------------------------------------------------===//
+// clause_task_depend enum.
+//===----------------------------------------------------------------------===//
+
+def ClauseTaskDependIn : I32EnumAttrCase<"taskdependin", 0>;
+def ClauseTaskDependOut : I32EnumAttrCase<"taskdependout", 1>;
+def ClauseTaskDependInOut : I32EnumAttrCase<"taskdependinout", 2>;
+
+def ClauseTaskDepend : OpenMP_I32EnumAttr<
+ "ClauseTaskDepend",
+ "depend clause in a target or task construct", [
+ ClauseTaskDependIn,
+ ClauseTaskDependOut,
+ ClauseTaskDependInOut
+ ]>;
+
+def ClauseTaskDependAttr : OpenMP_EnumAttr<ClauseTaskDepend,
+ "clause_task_depend"> {
+ let assemblyFormat = "`(` $value `)`";
+}
+
+//===----------------------------------------------------------------------===//
+// data_sharing_type enum.
+//===----------------------------------------------------------------------===//
+
+def DataSharingTypePrivate : I32EnumAttrCase<"Private", 0, "private">;
+def DataSharingTypeFirstPrivate
+ : I32EnumAttrCase<"FirstPrivate", 1, "firstprivate">;
+
+def DataSharingClauseType : OpenMP_I32EnumAttr<
+ "DataSharingClauseType",
+ "Type of a data-sharing clause", [
+ DataSharingTypePrivate,
+ DataSharingTypeFirstPrivate
+ ]>;
+
+def DataSharingClauseTypeAttr : OpenMP_EnumAttr<DataSharingClauseType,
+ "data_sharing_type"> {
+ let assemblyFormat = "`{` `type` `=` $value `}`";
+}
+
+//===----------------------------------------------------------------------===//
+// device_type enum.
+//===----------------------------------------------------------------------===//
+
+def DeviceTypeAny : I32EnumAttrCase<"any", 0>;
+def DeviceTypeHost : I32EnumAttrCase<"host", 1>;
+def DeviceTypeNoHost : I32EnumAttrCase<"nohost", 2>;
+
+def DeclareTargetDeviceType : OpenMP_I32EnumAttr<
+ "DeclareTargetDeviceType",
+ "device_type clause", [
+ DeviceTypeAny,
+ DeviceTypeHost,
+ DeviceTypeNoHost
+ ]>;
+
+def DeclareTargetDeviceTypeAttr : OpenMP_EnumAttr<DeclareTargetDeviceType,
+ "device_type"> {
+ let assemblyFormat = "`(` $value `)`";
+}
+
+//===----------------------------------------------------------------------===//
+// sched_mod enum.
+//===----------------------------------------------------------------------===//
+
+def OpenMP_ScheduleModNone : I32EnumAttrCase<"none", 0>;
+def OpenMP_ScheduleModMonotonic : I32EnumAttrCase<"monotonic", 1>;
+def OpenMP_ScheduleModNonmonotonic : I32EnumAttrCase<"nonmonotonic", 2>;
+// FIXME: remove this value for the modifier because this is handled using a
+// separate attribute
+def OpenMP_ScheduleModSimd : I32EnumAttrCase<"simd", 3>;
+
+def ScheduleModifier : OpenMP_I32EnumAttr<
+ "ScheduleModifier",
+ "OpenMP Schedule Modifier", [
+ OpenMP_ScheduleModNone,
+ OpenMP_ScheduleModMonotonic,
+ OpenMP_ScheduleModNonmonotonic,
+ OpenMP_ScheduleModSimd
+ ]>;
+
+def ScheduleModifierAttr : OpenMP_EnumAttr<ScheduleModifier, "sched_mod">;
+
+//===----------------------------------------------------------------------===//
+// variable_capture_kind enum.
+//===----------------------------------------------------------------------===//
+
+def CaptureThis : I32EnumAttrCase<"This", 0>;
+def CaptureByRef : I32EnumAttrCase<"ByRef", 1>;
+def CaptureByCopy : I32EnumAttrCase<"ByCopy", 2>;
+def CaptureVLAType : I32EnumAttrCase<"VLAType", 3>;
+
+def VariableCaptureKind : OpenMP_I32EnumAttr<
+ "VariableCaptureKind",
+ "variable capture kind", [
+ CaptureThis,
+ CaptureByRef,
+ CaptureByCopy,
+ CaptureVLAType
+ ]>;
+
+def VariableCaptureKindAttr : OpenMP_EnumAttr<VariableCaptureKind,
+ "variable_capture_kind"> {
+ let assemblyFormat = "`(` $value `)`";
+}
+
+#endif // OPENMP_ENUMS
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td
new file mode 100644
index 000000000000..b98d87aa74a6
--- /dev/null
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpBase.td
@@ -0,0 +1,48 @@
+//===- OpenMPOpBase.td - OpenMP dialect shared definitions -*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains shared definitions for the OpenMP dialect.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef OPENMP_OP_BASE
+#define OPENMP_OP_BASE
+
+include "mlir/Dialect/OpenMP/OpenMPAttrDefs.td"
+include "mlir/Dialect/OpenMP/OpenMPDialect.td"
+include "mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td"
+include "mlir/Dialect/OpenMP/OpenMPTypeInterfaces.td"
+include "mlir/IR/OpBase.td"
+
+//===----------------------------------------------------------------------===//
+// OpenMP dialect type constraints.
+//===----------------------------------------------------------------------===//
+
+class OpenMP_Type<string name, string typeMnemonic> :
+ TypeDef<OpenMP_Dialect, name> {
+ let mnemonic = typeMnemonic;
+}
+
+// Type which can be constraint accepting standard integers and indices.
+def IntLikeType : AnyTypeOf<[AnyInteger, Index]>;
+
+def OpenMP_PointerLikeType : TypeAlias<OpenMP_PointerLikeTypeInterface,
+ "OpenMP-compatible variable type">;
+
+def OpenMP_MapBoundsType : OpenMP_Type<"MapBounds", "map_bounds_ty"> {
+ let summary = "Type for representing omp map clause bounds information";
+}
+
+//===----------------------------------------------------------------------===//
+// Base classes for OpenMP dialect operations.
+//===----------------------------------------------------------------------===//
+
+class OpenMP_Op<string mnemonic, list<Trait> traits = []> :
+ Op<OpenMP_Dialect, mnemonic, traits>;
+
+#endif // OPENMP_OP_BASE
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
index 29c287cad06e..122abbe7cc97 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOps.td
@@ -14,145 +14,20 @@
#ifndef OPENMP_OPS
#define OPENMP_OPS
+include "mlir/Dialect/LLVMIR/LLVMOpBase.td"
+include "mlir/Dialect/OpenACCMPCommon/Interfaces/AtomicInterfaces.td"
+include "mlir/Dialect/OpenMP/OpenMPAttrDefs.td"
+include "mlir/Dialect/OpenMP/OpenMPOpBase.td"
+include "mlir/Interfaces/ControlFlowInterfaces.td"
+include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/IR/EnumAttr.td"
include "mlir/IR/OpBase.td"
-include "mlir/Interfaces/SideEffectInterfaces.td"
-include "mlir/Interfaces/ControlFlowInterfaces.td"
include "mlir/IR/SymbolInterfaces.td"
-include "mlir/Dialect/LLVMIR/LLVMOpBase.td"
-include "mlir/Dialect/OpenACCMPCommon/Interfaces/AtomicInterfaces.td"
-include "mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td"
-include "mlir/Dialect/OpenMP/OpenMPTypeInterfaces.td"
-
-def OpenMP_Dialect : Dialect {
- let name = "omp";
- let cppNamespace = "::mlir::omp";
- let dependentDialects = ["::mlir::LLVM::LLVMDialect, ::mlir::func::FuncDialect"];
- let useDefaultAttributePrinterParser = 1;
- let useDefaultTypePrinterParser = 1;
-}
-
-// OmpCommon requires definition of OpenACC_Dialect.
-include "mlir/Dialect/OpenMP/OmpCommon.td"
-
-//===----------------------------------------------------------------------===//
-// OpenMP Attributes
-//===----------------------------------------------------------------------===//
-
-class OpenMP_Attr<string name, string attrMnemonic,
- list<Trait> traits = [],
- string baseCppClass = "::mlir::Attribute">
- : AttrDef<OpenMP_Dialect, name, traits, baseCppClass> {
- let mnemonic = attrMnemonic;
-}
-
-def VersionAttr : OpenMP_Attr<"Version", "version"> {
- let parameters = (ins
- "uint32_t":$version
- );
-
- let assemblyFormat = "`<` struct(params) `>`";
-}
-
-//===----------------------------------------------------------------------===//
-// Runtime library flag's attribute that holds information for lowering to LLVM
-//===----------------------------------------------------------------------===//
-
-def FlagsAttr : OpenMP_Attr<"Flags", "flags"> {
- let parameters = (ins
- DefaultValuedParameter<"uint32_t", "0">:$debug_kind,
- DefaultValuedParameter<"bool", "false">:$assume_teams_oversubscription,
- DefaultValuedParameter<"bool", "false">:$assume_threads_oversubscription,
- DefaultValuedParameter<"bool", "false">:$assume_no_thread_state,
- DefaultValuedParameter<"bool", "false">:$assume_no_nested_parallelism,
- DefaultValuedParameter<"bool", "false">:$no_gpu_lib,
- DefaultValuedParameter<"uint32_t", "50">:$openmp_device_version
- );
-
- let assemblyFormat = "`<` struct(params) `>`";
-}
-
-
-class OpenMP_Op<string mnemonic, list<Trait> traits = []> :
- Op<OpenMP_Dialect, mnemonic, traits>;
-
-// Type which can be constraint accepting standard integers and indices.
-def IntLikeType : AnyTypeOf<[AnyInteger, Index]>;
-
-def OpenMP_PointerLikeType : TypeAlias<OpenMP_PointerLikeTypeInterface,
- "OpenMP-compatible variable type">;
-
-class OpenMP_Type<string name, string typeMnemonic> : TypeDef<OpenMP_Dialect, name> {
- let mnemonic = typeMnemonic;
-}
-
-//===----------------------------------------------------------------------===//
-// 2.12.7 Declare Target Directive
-//===----------------------------------------------------------------------===//
-
-def DeviceTypeAny : I32EnumAttrCase<"any", 0>;
-def DeviceTypeHost : I32EnumAttrCase<"host", 1>;
-def DeviceTypeNoHost : I32EnumAttrCase<"nohost", 2>;
-
-def DeclareTargetDeviceType : I32EnumAttr<
- "DeclareTargetDeviceType",
- "device_type clause",
- [DeviceTypeAny, DeviceTypeHost, DeviceTypeNoHost]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-
-def DeclareTargetDeviceTypeAttr : EnumAttr<OpenMP_Dialect, DeclareTargetDeviceType,
- "device_type"> {
- let assemblyFormat = "`(` $value `)`";
-}
-
-def CaptureClauseTo : I32EnumAttrCase<"to", 0>;
-def CaptureClauseLink : I32EnumAttrCase<"link", 1>;
-def CaptureClauseEnter : I32EnumAttrCase<"enter", 2>;
-
-def DeclareTargetCaptureClause : I32EnumAttr<
- "DeclareTargetCaptureClause",
- "capture clause",
- [CaptureClauseTo, CaptureClauseLink, CaptureClauseEnter]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-
-def DeclareTargetCaptureClauseAttr : EnumAttr<OpenMP_Dialect, DeclareTargetCaptureClause,
- "capture_clause"> {
- let assemblyFormat = "`(` $value `)`";
-}
-
-def DeclareTargetAttr : OpenMP_Attr<"DeclareTarget", "declaretarget"> {
- let parameters = (ins
- OptionalParameter<"DeclareTargetDeviceTypeAttr">:$device_type,
- OptionalParameter<"DeclareTargetCaptureClauseAttr">:$capture_clause
- );
-
- let assemblyFormat = "`<` struct(params) `>`";
-}
//===----------------------------------------------------------------------===//
// 2.19.4 Data-Sharing Attribute Clauses
//===----------------------------------------------------------------------===//
-def DataSharingTypePrivate : I32EnumAttrCase<"Private", 0, "private">;
-def DataSharingTypeFirstPrivate : I32EnumAttrCase<"FirstPrivate", 1, "firstprivate">;
-
-def DataSharingClauseType : I32EnumAttr<
- "DataSharingClauseType",
- "Type of a data-sharing clause",
- [DataSharingTypePrivate, DataSharingTypeFirstPrivate]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-
-def DataSharingClauseTypeAttr : EnumAttr<
- OpenMP_Dialect, DataSharingClauseType, "data_sharing_type"> {
- let assemblyFormat = "`{` `type` `=` $value `}`";
-}
-
def PrivateClauseOp : OpenMP_Op<"private", [IsolatedFromAbove]> {
let summary = "Provides declaration of [first]private logic.";
let description = [{
@@ -403,23 +278,6 @@ def TeamsOp : OpenMP_Op<"teams", [
let hasVerifier = 1;
}
-def OMP_ScheduleModNone : I32EnumAttrCase<"none", 0>;
-def OMP_ScheduleModMonotonic : I32EnumAttrCase<"monotonic", 1>;
-def OMP_ScheduleModNonmonotonic : I32EnumAttrCase<"nonmonotonic", 2>;
-// FIXME: remove this value for the modifier because this is handled using a
-// separate attribute
-def OMP_ScheduleModSIMD : I32EnumAttrCase<"simd", 3>;
-
-def ScheduleModifier
- : I32EnumAttr<"ScheduleModifier", "OpenMP Schedule Modifier",
- [OMP_ScheduleModNone, OMP_ScheduleModMonotonic,
- OMP_ScheduleModNonmonotonic, OMP_ScheduleModSIMD]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-def ScheduleModifierAttr : EnumAttr<OpenMP_Dialect, ScheduleModifier,
- "sched_mod">;
-
//===----------------------------------------------------------------------===//
// 2.8.1 Sections Construct
//===----------------------------------------------------------------------===//
@@ -904,26 +762,6 @@ def DistributeOp : OpenMP_Op<"distribute", [AttrSizedOperandSegments,
// 2.10.1 task Construct
//===----------------------------------------------------------------------===//
-def ClauseTaskDependIn : I32EnumAttrCase<"taskdependin", 0>;
-def ClauseTaskDependOut : I32EnumAttrCase<"taskdependout", 1>;
-def ClauseTaskDependInOut : I32EnumAttrCase<"taskdependinout", 2>;
-
-def ClauseTaskDepend : I32EnumAttr<
- "ClauseTaskDepend",
- "depend clause in a target or task construct",
- [ClauseTaskDependIn, ClauseTaskDependOut, ClauseTaskDependInOut]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-def ClauseTaskDependAttr :
- EnumAttr<OpenMP_Dialect, ClauseTaskDepend, "clause_task_depend"> {
- let assemblyFormat = "`(` $value `)`";
-}
-def TaskDependArrayAttr :
- TypedArrayAttrBase<ClauseTaskDependAttr, "clause_task_depend array attr"> {
- let constBuilderCall = ?;
- }
-
def TaskOp : OpenMP_Op<"task", [AttrSizedOperandSegments,
OutlineableOpenMPOpInterface, AutomaticAllocationScope,
ReductionClauseInterface]> {
@@ -1283,28 +1121,6 @@ def FlushOp : OpenMP_Op<"flush"> {
// Map related constructs
//===----------------------------------------------------------------------===//
-def CaptureThis : I32EnumAttrCase<"This", 0>;
-def CaptureByRef : I32EnumAttrCase<"ByRef", 1>;
-def CaptureByCopy : I32EnumAttrCase<"ByCopy", 2>;
-def CaptureVLAType : I32EnumAttrCase<"VLAType", 3>;
-
-def VariableCaptureKind : I32EnumAttr<
- "VariableCaptureKind",
- "variable capture kind",
- [CaptureThis, CaptureByRef, CaptureByCopy, CaptureVLAType]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-
-def VariableCaptureKindAttr : EnumAttr<OpenMP_Dialect, VariableCaptureKind,
- "variable_capture_kind"> {
- let assemblyFormat = "`(` $value `)`";
-}
-
-def MapBoundsType : OpenMP_Type<"MapBounds", "map_bounds_ty"> {
- let summary = "Type for representing omp map clause bounds information";
-}
-
def MapBoundsOp : OpenMP_Op<"map.bounds",
[AttrSizedOperandSegments, NoMemoryEffect]> {
let summary = "Represents normalized bounds information for map clauses.";
@@ -1386,7 +1202,7 @@ def MapBoundsOp : OpenMP_Op<"map.bounds",
Optional<IntLikeType>:$stride,
DefaultValuedAttr<BoolAttr, "false">:$stride_in_bytes,
Optional<IntLikeType>:$start_idx);
- let results = (outs MapBoundsType:$result);
+ let results = (outs OpenMP_MapBoundsType:$result);
let assemblyFormat = [{
oilist(
@@ -1419,7 +1235,7 @@ def MapInfoOp : OpenMP_Op<"map.info", [AttrSizedOperandSegments]> {
Optional<OpenMP_PointerLikeType>:$var_ptr_ptr,
Variadic<OpenMP_PointerLikeType>:$members,
OptionalAttr<AnyIntElementsAttr>:$members_index,
- Variadic<MapBoundsType>:$bounds, /* rank-0 to rank-{n-1} */
+ Variadic<OpenMP_MapBoundsType>:$bounds, /* rank-0 to rank-{n-1} */
OptionalAttr<UI64Attr>:$map_type,
OptionalAttr<VariableCaptureKindAttr>:$map_capture_type,
OptionalAttr<StrAttr>:$name,
@@ -1894,20 +1710,6 @@ def BarrierOp : OpenMP_Op<"barrier"> {
// [5.1] 2.19.9 ordered Construct
//===----------------------------------------------------------------------===//
-def ClauseDependSource : I32EnumAttrCase<"dependsource", 0>;
-def ClauseDependSink : I32EnumAttrCase<"dependsink", 1>;
-
-def ClauseDepend : I32EnumAttr<
- "ClauseDepend",
- "depend clause",
- [ClauseDependSource, ClauseDependSink]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-def ClauseDependAttr : EnumAttr<OpenMP_Dialect, ClauseDepend, "clause_depend"> {
- let assemblyFormat = "`(` $value `)`";
-}
-
def OrderedOp : OpenMP_Op<"ordered"> {
let summary = "ordered construct without region";
let description = [{
@@ -2377,35 +2179,4 @@ def ReductionOp : OpenMP_Op<"reduction"> {
let hasVerifier = 1;
}
-//===----------------------------------------------------------------------===//
-// 8.2 requires directive
-//===----------------------------------------------------------------------===//
-
-// atomic_default_mem_order clause values not defined here because they can be
-// represented by the OMPC_MemoryOrder enumeration instead.
-def ClauseRequiresNone : I32BitEnumAttrCaseNone<"none">;
-def ClauseRequiresReverseOffload : I32BitEnumAttrCaseBit<"reverse_offload", 0>;
-def ClauseRequiresUnifiedAddress : I32BitEnumAttrCaseBit<"unified_address", 1>;
-def ClauseRequiresUnifiedSharedMemory
- : I32BitEnumAttrCaseBit<"unified_shared_memory", 2>;
-def ClauseRequiresDynamicAllocators
- : I32BitEnumAttrCaseBit<"dynamic_allocators", 3>;
-
-def ClauseRequires : I32BitEnumAttr<
- "ClauseRequires",
- "requires clauses",
- [
- ClauseRequiresNone,
- ClauseRequiresReverseOffload,
- ClauseRequiresUnifiedAddress,
- ClauseRequiresUnifiedSharedMemory,
- ClauseRequiresDynamicAllocators
- ]> {
- let genSpecializedAttr = 0;
- let cppNamespace = "::mlir::omp";
-}
-def ClauseRequiresAttr :
- EnumAttr<OpenMP_Dialect, ClauseRequires, "clause_requires"> {
-}
-
#endif // OPENMP_OPS
diff --git a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
index d9569d9d294d..31a306072d0e 100644
--- a/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
+++ b/mlir/include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td
@@ -10,8 +10,8 @@
//
//===----------------------------------------------------------------------===//
-#ifndef OpenMP_OPS_INTERFACES
-#define OpenMP_OPS_INTERFACES
+#ifndef OPENMP_OPS_INTERFACES
+#define OPENMP_OPS_INTERFACES
include "mlir/IR/OpBase.td"
@@ -349,4 +349,4 @@ def OffloadModuleInterface : OpInterface<"OffloadModuleInterface"> {
];
}
-#endif // OpenMP_OPS_INTERFACES
+#endif // OPENMP_OPS_INTERFACES
diff --git a/mlir/include/mlir/Dialect/Polynomial/IR/Polynomial.td b/mlir/include/mlir/Dialect/Polynomial/IR/Polynomial.td
index 294f58ae084b..3ef899d3376b 100644
--- a/mlir/include/mlir/Dialect/Polynomial/IR/Polynomial.td
+++ b/mlir/include/mlir/Dialect/Polynomial/IR/Polynomial.td
@@ -1,4 +1,4 @@
-//===- PolynomialOps.td - Polynomial dialect ---------------*- tablegen -*-===//
+//===- Polynomial.td - Polynomial dialect ------------------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -15,22 +15,7 @@ include "mlir/Interfaces/InferTypeOpInterface.td"
include "mlir/Interfaces/SideEffectInterfaces.td"
include "mlir/Dialect/Polynomial/IR/PolynomialDialect.td"
include "mlir/Dialect/Polynomial/IR/PolynomialAttributes.td"
-
-class Polynomial_Type<string name, string typeMnemonic>
- : TypeDef<Polynomial_Dialect, name> {
- let mnemonic = typeMnemonic;
-}
-
-def Polynomial_PolynomialType : Polynomial_Type<"Polynomial", "polynomial"> {
- let summary = "An element of a polynomial ring.";
- let description = [{
- A type for polynomials in a polynomial quotient ring.
- }];
- let parameters = (ins Polynomial_RingAttr:$ring);
- let assemblyFormat = "`<` struct(params) `>`";
-}
-
-def PolynomialLike: TypeOrContainer<Polynomial_PolynomialType, "polynomial-like">;
+include "mlir/Dialect/Polynomial/IR/PolynomialTypes.td"
class Polynomial_Op<string mnemonic, list<Trait> traits = []> :
Op<Polynomial_Dialect, mnemonic, traits # [Pure]> {
diff --git a/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialTypes.td b/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialTypes.td
new file mode 100644
index 000000000000..89e406183e0b
--- /dev/null
+++ b/mlir/include/mlir/Dialect/Polynomial/IR/PolynomialTypes.td
@@ -0,0 +1,32 @@
+//===- PolynomialTypes.td - Polynomial types ---------------*- tablegen -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef POLYNOMIAL_TYPES
+#define POLYNOMIAL_TYPES
+
+include "mlir/Dialect/Polynomial/IR/PolynomialAttributes.td"
+include "mlir/Dialect/Polynomial/IR/PolynomialDialect.td"
+
+class Polynomial_Type<string name, string typeMnemonic>
+ : TypeDef<Polynomial_Dialect, name> {
+ let mnemonic = typeMnemonic;
+}
+
+def Polynomial_PolynomialType : Polynomial_Type<"Polynomial", "polynomial"> {
+ let summary = "An element of a polynomial ring.";
+ let description = [{
+ A type for polynomials in a polynomial quotient ring.
+ }];
+ let parameters = (ins Polynomial_RingAttr:$ring);
+ let assemblyFormat = "`<` struct(params) `>`";
+}
+
+def PolynomialLike: TypeOrContainer<Polynomial_PolynomialType, "polynomial-like">;
+
+
+#endif // POLYNOMIAL_TYPES
diff --git a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
index 030be328e97f..9c83acc76e77 100644
--- a/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
+++ b/mlir/include/mlir/Dialect/Vector/Utils/VectorUtils.h
@@ -157,7 +157,14 @@ private:
if (failed(newOp))
return failure();
- rewriter.replaceOp(rootOp, *newOp);
+ // Rewriting succeeded but there are no values to replace.
+ if (rootOp->getNumResults() == 0) {
+ rewriter.eraseOp(rootOp);
+ } else {
+ assert(*newOp != Value() &&
+ "Cannot replace an op's use with an empty value.");
+ rewriter.replaceOp(rootOp, *newOp);
+ }
return success();
}
diff --git a/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp b/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
index d8e473a562e5..87923477766d 100644
--- a/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
+++ b/mlir/lib/Conversion/VectorToArmSME/VectorToArmSME.cpp
@@ -356,6 +356,20 @@ struct TransposeOpToArmSMELowering
return failure();
auto loc = transposeOp.getLoc();
+ Value input = transposeOp.getVector();
+
+ if (auto xferOp = input.getDefiningOp<vector::TransferReadOp>();
+ xferOp && xferOp->hasOneUse()) {
+ // Fold transpose into transfer_read to enable in-flight transpose when
+ // converting to arm_sme.tile_load.
+ rewriter.modifyOpInPlace(xferOp, [&]() {
+ xferOp->setAttr(xferOp.getPermutationMapAttrName(),
+ AffineMapAttr::get(AffineMap::getPermutationMap(
+ permutation, transposeOp.getContext())));
+ });
+ rewriter.replaceOp(transposeOp, xferOp);
+ return success();
+ }
// Allocate buffer to store input tile to.
Value vscale =
@@ -372,8 +386,6 @@ struct TransposeOpToArmSMELowering
auto buffer = rewriter.create<memref::AllocaOp>(
loc, bufferType, ValueRange{numTileSlices, numTileSlices});
- Value input = transposeOp.getVector();
-
// Store input tile.
auto tileStoreOp = rewriter.create<arm_sme::TileStoreOp>(
loc, input, buffer, ValueRange{c0, c0});
diff --git a/mlir/lib/Dialect/IRDL/IR/IRDL.cpp b/mlir/lib/Dialect/IRDL/IR/IRDL.cpp
index 4eae2b03024c..e4728f55b49d 100644
--- a/mlir/lib/Dialect/IRDL/IR/IRDL.cpp
+++ b/mlir/lib/Dialect/IRDL/IR/IRDL.cpp
@@ -132,22 +132,37 @@ LogicalResult BaseOp::verify() {
return success();
}
+static LogicalResult
+checkSymbolIsTypeOrAttribute(SymbolTableCollection &symbolTable,
+ Operation *source, SymbolRefAttr symbol) {
+ Operation *targetOp = symbolTable.lookupNearestSymbolFrom(source, symbol);
+ if (!targetOp)
+ return source->emitOpError() << "symbol '" << symbol << "' not found";
+
+ if (!isa<TypeOp, AttributeOp>(targetOp))
+ return source->emitOpError() << "symbol '" << symbol
+ << "' does not refer to a type or attribute "
+ "definition (refers to '"
+ << targetOp->getName() << "')";
+
+ return success();
+}
+
LogicalResult BaseOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
std::optional<SymbolRefAttr> baseRef = getBaseRef();
if (!baseRef)
return success();
- TypeOp typeOp = symbolTable.lookupNearestSymbolFrom<TypeOp>(*this, *baseRef);
- if (typeOp)
- return success();
+ return checkSymbolIsTypeOrAttribute(symbolTable, *this, *baseRef);
+}
- AttributeOp attrOp =
- symbolTable.lookupNearestSymbolFrom<AttributeOp>(*this, *baseRef);
- if (attrOp)
+LogicalResult
+ParametricOp::verifySymbolUses(SymbolTableCollection &symbolTable) {
+ std::optional<SymbolRefAttr> baseRef = getBaseType();
+ if (!baseRef)
return success();
- return emitOpError() << "'" << *baseRef
- << "' does not refer to a type or attribute definition";
+ return checkSymbolIsTypeOrAttribute(symbolTable, *this, *baseRef);
}
/// Parse a value with its variadicity first. By default, the variadicity is
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
index 3627ff6617ed..f35ab3b856b4 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
@@ -71,6 +71,99 @@ bool linalg::isaCopyOpInterface(LinalgOp linalgOp) {
}
//===----------------------------------------------------------------------===//
+// FillOpInterface implementation
+//===----------------------------------------------------------------------===//
+std::optional<Value> linalg::isaFillOpInterface(GenericOp genericOp) {
+ // Structural.
+ if (genericOp.getNumParallelLoops() != genericOp.getNumLoops() ||
+ genericOp.getNumDpsInputs() != 1 || genericOp.getNumDpsInits() != 1)
+ return std::nullopt;
+
+ // Input should be referenced and init should not.
+ if (!genericOp.payloadUsesValueFromOperand(genericOp.getDpsInputOperand(0)) ||
+ genericOp.payloadUsesValueFromOperand(genericOp.getDpsInitOperand(0)))
+ return std::nullopt;
+
+ OpOperand *value = genericOp.getDpsInputOperand(0);
+ if (!genericOp.isScalar(value))
+ return std::nullopt;
+
+ Block *body = genericOp.getBody();
+ if (body->getOperations().size() != 1)
+ return std::nullopt;
+
+ auto yieldOp = dyn_cast<linalg::YieldOp>(body->back());
+ if (!yieldOp || yieldOp.getNumOperands() != 1 ||
+ yieldOp->getOperand(0) != body->getArgument(0))
+ return std::nullopt;
+ return value->get();
+}
+
+//===----------------------------------------------------------------------===//
+// Elementwise Single Unary/Binary-OpInterface implementation
+//===----------------------------------------------------------------------===//
+static bool
+isaElemwiseSingleUnaryOrBinaryOpInterface(linalg::GenericOp genericOp,
+ unsigned arity) {
+ // Check all loops are parallel, and have only tensor semantics.
+ if (genericOp.getNumParallelLoops() != genericOp.getNumLoops() ||
+ genericOp.getNumLoops() < 1 || !genericOp.hasPureTensorSemantics())
+ return false;
+
+ // Check there are arity-inputs, 1-output and all are identity-maps.
+ if (genericOp.getNumDpsInputs() != arity || genericOp.getNumDpsInits() != 1 ||
+ !llvm::all_of(genericOp.getIndexingMapsArray(),
+ [](AffineMap map) { return map.isIdentity(); }))
+ return false;
+
+ // Init should not be referenced for elementwise operations.
+ if (genericOp.payloadUsesValueFromOperand(genericOp.getDpsInitOperand(0)))
+ return false;
+
+ // A linalg.generic could be series of elementwise ops e.g. exp(neg(x)) such
+ // as resulting from producer-consumer fusion. Here, we restrict to two ops in
+ // the body, where the first is the elementwise single op and the second a
+ // yield.
+ Block *body = genericOp.getBody();
+ if (body->getOperations().size() != 2)
+ return false;
+
+ Operation *op = &body->front();
+ if (op->getNumOperands() != arity || op->getNumResults() != 1)
+ return false;
+
+ auto yieldOp = dyn_cast<linalg::YieldOp>(body->back());
+ if (!yieldOp || yieldOp.getNumOperands() != 1 ||
+ yieldOp->getOperand(0).getDefiningOp() != op)
+ return false;
+ return true;
+}
+
+bool linalg::isaElemwiseSingleUnaryOpInterface(linalg::GenericOp genericOp) {
+ // All basic elemwise checks.
+ if (!isaElemwiseSingleUnaryOrBinaryOpInterface(genericOp, 1))
+ return false;
+
+ // Check input is actully used.
+ if (!genericOp.payloadUsesValueFromOperand(genericOp.getDpsInputOperand(0)))
+ return false;
+ return true;
+}
+
+bool linalg::isaElemwiseSingleBinaryOpInterface(linalg::GenericOp genericOp) {
+ if (!isaElemwiseSingleUnaryOrBinaryOpInterface(genericOp, 2))
+ return false;
+
+ // Check both inputs are used (elementwise).
+ OpOperand *inputOpOperand0 = genericOp.getDpsInputOperand(0);
+ OpOperand *inputOpOperand1 = genericOp.getDpsInputOperand(1);
+ if (!genericOp.payloadUsesValueFromOperand(inputOpOperand0) ||
+ !genericOp.payloadUsesValueFromOperand(inputOpOperand1))
+ return false;
+ return true;
+}
+
+//===----------------------------------------------------------------------===//
// ContractionOpInterface implementation
//===----------------------------------------------------------------------===//
diff --git a/mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp b/mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp
index 4c437b5db2c7..2bc4d7fbfadc 100644
--- a/mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp
+++ b/mlir/lib/Dialect/Linalg/Transforms/Specialize.cpp
@@ -14,13 +14,50 @@
#include "mlir/Dialect/Linalg/IR/Linalg.h"
#include "mlir/Dialect/Linalg/IR/LinalgInterfaces.h"
#include "mlir/Dialect/Linalg/Transforms/Transforms.h"
+#include "mlir/Dialect/Math/IR/Math.h"
#include "llvm/Support/Debug.h"
#define DEBUG_TYPE "linalg-specialization"
+#define REPLACE_BINARY_OP(NEWOP, OPERANDS_SWAP) \
+ (rewriter.replaceOpWithNewOp<NEWOP>( \
+ genericOp, \
+ ValueRange{genericOp.getDpsInputs()[(OPERANDS_SWAP) ? 1 : 0], \
+ genericOp.getDpsInputs()[(OPERANDS_SWAP) ? 0 : 1]}, \
+ ValueRange{genericOp.getDpsInits()[0]}))
+
+#define REPLACE_UNARY_OP(NEWOP) \
+ (rewriter.replaceOpWithNewOp<NEWOP>(genericOp, \
+ ValueRange{genericOp.getDpsInputs()[0]}, \
+ ValueRange{genericOp.getDpsInits()[0]}))
+
using namespace mlir;
using namespace mlir::linalg;
+// Given a elementwise single binary linalg generic op, checks whether the
+// binary op accesses operands as swapped. e.g.
+// this differentiates between a linalg-generic body that contains:
+// ^bb0(%a: f32, %b: f32, %c : f32):
+// %0 = arith.subf %a, %b : f32
+// linalg.yield %0: f32
+// against:
+// ^bb0(%a: f32, %b: f32, %c : f32):
+// %0 = arith.subf %b, %a : f32
+// linalg.yield %0: f32
+// Former is linalg.sub(a,b), latter is linalg.sub(b,a).
+static bool areBinOpsSwapped(GenericOp genericOp) {
+ Block *body = genericOp.getBody();
+ Operation *op = &body->front();
+ bool swapped = false;
+ if (op->getOpOperand(0).get() != body->getArgument(0)) {
+ swapped = true;
+ assert(op->getOpOperand(0).get() == body->getArgument(1) &&
+ op->getOpOperand(1).get() == body->getArgument(0) &&
+ "binary op uses just one block arg");
+ }
+ return swapped;
+}
+
FailureOr<LinalgOp> mlir::linalg::specializeGenericOp(RewriterBase &rewriter,
GenericOp genericOp) {
if (isaCopyOpInterface(genericOp)) {
@@ -28,5 +65,40 @@ FailureOr<LinalgOp> mlir::linalg::specializeGenericOp(RewriterBase &rewriter,
genericOp, genericOp.getDpsInputs()[0], genericOp.getDpsInits()[0]);
return namedOp;
}
+
+ if (isaFillOpInterface(genericOp)) {
+ LinalgOp namedOp = rewriter.replaceOpWithNewOp<FillOp>(
+ genericOp, genericOp.getDpsInputs()[0], genericOp.getDpsInits()[0]);
+ return namedOp;
+ }
+
+ if (isaElemwiseSingleUnaryOpInterface(genericOp)) {
+ Operation *op = &genericOp.getBody()->front();
+ if (isa<math::ExpOp>(op)) {
+ LinalgOp namedOp = REPLACE_UNARY_OP(ExpOp);
+ return namedOp;
+ }
+ }
+
+ if (isaElemwiseSingleBinaryOpInterface(genericOp)) {
+ bool swap = areBinOpsSwapped(genericOp);
+ Operation *op = &genericOp.getBody()->front();
+ if (isa<arith::AddFOp>(op)) {
+ LinalgOp namedOp = REPLACE_BINARY_OP(AddOp, swap);
+ return namedOp;
+ }
+ if (isa<arith::SubFOp>(op)) {
+ LinalgOp namedOp = REPLACE_BINARY_OP(SubOp, swap);
+ return namedOp;
+ }
+ if (isa<arith::MulFOp>(op)) {
+ LinalgOp namedOp = REPLACE_BINARY_OP(MulOp, swap);
+ return namedOp;
+ }
+ if (isa<arith::DivFOp>(op)) {
+ LinalgOp namedOp = REPLACE_BINARY_OP(DivOp, swap);
+ return namedOp;
+ }
+ }
return failure();
}
diff --git a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
index 45f39c80041c..d70e6d0b79cd 100644
--- a/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
+++ b/mlir/lib/Dialect/MemRef/IR/MemRefOps.cpp
@@ -833,11 +833,31 @@ struct FoldSelfCopy : public OpRewritePattern<CopyOp> {
return success();
}
};
+
+struct FoldEmptyCopy final : public OpRewritePattern<CopyOp> {
+ using OpRewritePattern<CopyOp>::OpRewritePattern;
+
+ static bool isEmptyMemRef(BaseMemRefType type) {
+ return type.hasRank() &&
+ llvm::any_of(type.getShape(), [](int64_t x) { return x == 0; });
+ }
+
+ LogicalResult matchAndRewrite(CopyOp copyOp,
+ PatternRewriter &rewriter) const override {
+ if (isEmptyMemRef(copyOp.getSource().getType()) ||
+ isEmptyMemRef(copyOp.getTarget().getType())) {
+ rewriter.eraseOp(copyOp);
+ return success();
+ }
+
+ return failure();
+ }
+};
} // namespace
void CopyOp::getCanonicalizationPatterns(RewritePatternSet &results,
MLIRContext *context) {
- results.add<FoldCopyOfCast, FoldSelfCopy>(context);
+ results.add<FoldCopyOfCast, FoldEmptyCopy, FoldSelfCopy>(context);
}
LogicalResult CopyOp::fold(FoldAdaptor adaptor,
diff --git a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
index 8a6df82abb31..8545c7b9af8f 100644
--- a/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
+++ b/mlir/lib/Dialect/Tensor/IR/TensorOps.cpp
@@ -2609,6 +2609,9 @@ OpFoldResult InsertSliceOp::fold(FoldAdaptor) {
return getResult();
if (auto result = foldInsertAfterExtractSlice(*this))
return result;
+ if (llvm::any_of(getMixedSizes(),
+ [](OpFoldResult ofr) { return isConstantIntValue(ofr, 0); }))
+ return getDest();
return OpFoldResult();
}
diff --git a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
index b30b43d70bf0..c59012266ceb 100644
--- a/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
+++ b/mlir/lib/Dialect/Vector/Transforms/LowerVectorTransfer.cpp
@@ -90,14 +90,19 @@ namespace {
/// Note that an alternative is to transform it to linalg.transpose +
/// vector.transfer_read to do the transpose in memory instead.
struct TransferReadPermutationLowering
- : public OpRewritePattern<vector::TransferReadOp> {
- using OpRewritePattern::OpRewritePattern;
+ : public MaskableOpRewritePattern<vector::TransferReadOp> {
+ using MaskableOpRewritePattern::MaskableOpRewritePattern;
- LogicalResult matchAndRewrite(vector::TransferReadOp op,
- PatternRewriter &rewriter) const override {
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferReadOp op,
+ MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override {
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
+ // TODO: Support transfer_read inside MaskOp case.
+ if (maskOp)
+ return rewriter.notifyMatchFailure(op, "Masked case not supported");
SmallVector<unsigned> permutation;
AffineMap map = op.getPermutationMap();
@@ -142,9 +147,9 @@ struct TransferReadPermutationLowering
// Transpose result of transfer_read.
SmallVector<int64_t> transposePerm(permutation.begin(), permutation.end());
- rewriter.replaceOpWithNewOp<vector::TransposeOp>(op, newRead,
- transposePerm);
- return success();
+ return rewriter
+ .create<vector::TransposeOp>(op.getLoc(), newRead, transposePerm)
+ .getResult();
}
};
@@ -165,14 +170,19 @@ struct TransferReadPermutationLowering
/// %v = vector.transfer_write %tmp ...
/// permutation_map: (d0, d1, d2, d3) -> (d2, d3)
struct TransferWritePermutationLowering
- : public OpRewritePattern<vector::TransferWriteOp> {
- using OpRewritePattern::OpRewritePattern;
+ : public MaskableOpRewritePattern<vector::TransferWriteOp> {
+ using MaskableOpRewritePattern::MaskableOpRewritePattern;
- LogicalResult matchAndRewrite(vector::TransferWriteOp op,
- PatternRewriter &rewriter) const override {
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferWriteOp op,
+ MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override {
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
+ // TODO: Support transfer_write inside MaskOp case.
+ if (maskOp)
+ return rewriter.notifyMatchFailure(op, "Masked case not supported");
SmallVector<unsigned> permutation;
AffineMap map = op.getPermutationMap();
@@ -207,11 +217,14 @@ struct TransferWritePermutationLowering
op.getLoc(), op.getVector(), indices);
auto newMap = AffineMap::getMinorIdentityMap(
map.getNumDims(), map.getNumResults(), rewriter.getContext());
- rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
- op, newVec, op.getSource(), op.getIndices(), AffineMapAttr::get(newMap),
- op.getMask(), newInBoundsAttr);
-
- return success();
+ auto newWrite = rewriter.create<vector::TransferWriteOp>(
+ op.getLoc(), newVec, op.getSource(), op.getIndices(),
+ AffineMapAttr::get(newMap), op.getMask(), newInBoundsAttr);
+ if (newWrite.hasPureTensorSemantics())
+ return newWrite.getResult();
+ // In the memref case there's no return value. Use empty value to signal
+ // success.
+ return Value();
}
};
@@ -231,14 +244,19 @@ struct TransferWritePermutationLowering
/// vector<1x8x16xf32>
/// ```
struct TransferWriteNonPermutationLowering
- : public OpRewritePattern<vector::TransferWriteOp> {
- using OpRewritePattern::OpRewritePattern;
+ : public MaskableOpRewritePattern<vector::TransferWriteOp> {
+ using MaskableOpRewritePattern::MaskableOpRewritePattern;
- LogicalResult matchAndRewrite(vector::TransferWriteOp op,
- PatternRewriter &rewriter) const override {
+ FailureOr<mlir::Value>
+ matchAndRewriteMaskableOp(vector::TransferWriteOp op,
+ MaskingOpInterface maskOp,
+ PatternRewriter &rewriter) const override {
// TODO: support 0-d corner case.
if (op.getTransferRank() == 0)
return rewriter.notifyMatchFailure(op, "0-d corner case not supported");
+ // TODO: Support transfer_write inside MaskOp case.
+ if (maskOp)
+ return rewriter.notifyMatchFailure(op, "Masked case not supported");
SmallVector<unsigned> permutation;
AffineMap map = op.getPermutationMap();
@@ -285,10 +303,14 @@ struct TransferWriteNonPermutationLowering
newInBoundsValues.push_back(op.isDimInBounds(i));
}
ArrayAttr newInBoundsAttr = rewriter.getBoolArrayAttr(newInBoundsValues);
- rewriter.replaceOpWithNewOp<vector::TransferWriteOp>(
- op, newVec, op.getSource(), op.getIndices(), AffineMapAttr::get(newMap),
- newMask, newInBoundsAttr);
- return success();
+ auto newWrite = rewriter.create<vector::TransferWriteOp>(
+ op.getLoc(), newVec, op.getSource(), op.getIndices(),
+ AffineMapAttr::get(newMap), newMask, newInBoundsAttr);
+ if (newWrite.hasPureTensorSemantics())
+ return newWrite.getResult();
+ // In the memref case there's no return value. Use empty value to signal
+ // success.
+ return Value();
}
};
diff --git a/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp b/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp
index ed75b4a90536..4e19274c3da4 100644
--- a/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp
+++ b/mlir/lib/Tools/mlir-lsp-server/MLIRServer.cpp
@@ -917,7 +917,7 @@ void MLIRDocument::getCodeActionForDiagnostic(
edit.range = lsp::Range(lsp::Position(pos.line, 0));
// Use the indent of the current line for the expected-* diagnostic.
- size_t indent = line.find_first_not_of(" ");
+ size_t indent = line.find_first_not_of(' ');
if (indent == StringRef::npos)
indent = line.size();
diff --git a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp
index 44c5e9826f3b..a1b2893a973b 100644
--- a/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp
+++ b/mlir/lib/Tools/mlir-opt/MlirOptMain.cpp
@@ -266,6 +266,8 @@ LogicalResult loadIRDLDialects(StringRef irdlFile, MLIRContext &ctx) {
// Parse the input file.
OwningOpRef<ModuleOp> module(parseSourceFile<ModuleOp>(sourceMgr, &ctx));
+ if (!module)
+ return failure();
// Load IRDL dialects.
return irdl::loadDialects(module.get());
diff --git a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
index 802760f8c899..1d56ca97b737 100644
--- a/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
+++ b/mlir/test/Conversion/NVVMToLLVM/nvvm-to-llvm.mlir
@@ -688,7 +688,7 @@ func.func @fence_proxy() {
llvm.func @llvm_nvvm_barrier_arrive(%barID : i32, %numberOfThreads : i32) {
// CHECK: llvm.inline_asm has_side_effects asm_dialect = att "bar.arrive 0, $0;", "r" %[[numberOfThreads]] : (i32) -> ()
nvvm.barrier.arrive number_of_threads = %numberOfThreads
- // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "bar.arrive $0, $1", "r,r" %[[barId]], %[[numberOfThreads]] : (i32, i32) -> ()
+ // CHECK: llvm.inline_asm has_side_effects asm_dialect = att "bar.arrive $0, $1;", "r,r" %[[barId]], %[[numberOfThreads]] : (i32, i32) -> ()
nvvm.barrier.arrive id = %barID number_of_threads = %numberOfThreads
llvm.return
}
diff --git a/mlir/test/Conversion/VectorToArmSME/vector-to-arm-sme.mlir b/mlir/test/Conversion/VectorToArmSME/vector-to-arm-sme.mlir
index ce0b46e0f061..f22b6de52f36 100644
--- a/mlir/test/Conversion/VectorToArmSME/vector-to-arm-sme.mlir
+++ b/mlir/test/Conversion/VectorToArmSME/vector-to-arm-sme.mlir
@@ -150,6 +150,39 @@ func.func @transfer_read_2d_transpose_with_mask_f32(%src : memref<?x?xf32>, %mas
// -----
+// CHECK-LABEL: @fold_transpose_into_load
+// CHECK-NOT: arm_sme.tile_store
+// CHECK: arm_sme.tile_load {{.*}} layout<vertical> : memref<?x?xf32>, vector<[4]x[4]xf32>
+// CHECK-NOT: arm_sme.tile_store
+func.func @fold_transpose_into_load(%src : memref<?x?xf32>) {
+ %c0 = arith.constant 0 : index
+ %pad = arith.constant 0.0 : f32
+ %0 = vector.transfer_read %src[%c0, %c0], %pad {in_bounds = [true, true]} : memref<?x?xf32>, vector<[4]x[4]xf32>
+ %1 = vector.transpose %0, [1, 0] : vector<[4]x[4]xf32> to vector<[4]x[4]xf32>
+ "prevent.dce"(%1) : (vector<[4]x[4]xf32>) -> ()
+}
+
+// -----
+
+/// Transposes with more than a single use cannot be folded into load and will
+/// instead be transposed via memory.
+
+// CHECK-LABEL: @fold_transpose_into_load_multi_use
+// CHECK: arm_sme.tile_load {{.*}} : memref<?x?xf32>, vector<[4]x[4]xf32>
+// CHECK: arm_sme.tile_store {{.*}} : memref<?x?xf32>, vector<[4]x[4]xf32>
+// CHECK: %[[TILE_TRANSPOSED_VIA_MEM:.*]] = arm_sme.tile_load {{.*}} layout<vertical> : memref<?x?xf32>, vector<[4]x[4]xf32>
+// CHECK: "prevent.dce"(%[[TILE_TRANSPOSED_VIA_MEM]]) : (vector<[4]x[4]xf32>) -> ()
+func.func @fold_transpose_into_load_multi_use(%src : memref<?x?xf32>) {
+ %c0 = arith.constant 0 : index
+ %pad = arith.constant 0.0 : f32
+ %0 = vector.transfer_read %src[%c0, %c0], %pad {in_bounds = [true, true]} : memref<?x?xf32>, vector<[4]x[4]xf32>
+ "test.some_use"(%0) : (vector<[4]x[4]xf32>) -> ()
+ %1 = vector.transpose %0, [1, 0] : vector<[4]x[4]xf32> to vector<[4]x[4]xf32>
+ "prevent.dce"(%1) : (vector<[4]x[4]xf32>) -> ()
+}
+
+// -----
+
//===----------------------------------------------------------------------===//
// vector.transfer_write
//===----------------------------------------------------------------------===//
diff --git a/mlir/test/Dialect/Arith/int-range-interface.mlir b/mlir/test/Dialect/Arith/int-range-interface.mlir
index 16524b363472..17d3fcfc13ce 100644
--- a/mlir/test/Dialect/Arith/int-range-interface.mlir
+++ b/mlir/test/Dialect/Arith/int-range-interface.mlir
@@ -758,7 +758,7 @@ func.func private @callee(%arg0: memref<?xindex, 4>) {
}
// CHECK-LABEL: func @test_i8_bounds
-// CHECK: test.reflect_bounds {smax = 127 : i8, smin = -128 : i8, umax = -1 : i8, umin = 0 : i8}
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = -128 : si8, umax = 255 : ui8, umin = 0 : ui8}
func.func @test_i8_bounds() -> i8 {
%cst1 = arith.constant 1 : i8
%0 = test.with_bounds { umin = 0 : i8, umax = 255 : i8, smin = -128 : i8, smax = 127 : i8 } : i8
diff --git a/mlir/test/Dialect/Arith/int-range-opts.mlir b/mlir/test/Dialect/Arith/int-range-opts.mlir
index 6179003ab4e7..71174f1c5ef0 100644
--- a/mlir/test/Dialect/Arith/int-range-opts.mlir
+++ b/mlir/test/Dialect/Arith/int-range-opts.mlir
@@ -75,7 +75,7 @@ func.func @test() -> i1 {
// -----
// CHECK-LABEL: func @test
-// CHECK: test.reflect_bounds {smax = 24 : i8, smin = 0 : i8, umax = 24 : i8, umin = 0 : i8}
+// CHECK: test.reflect_bounds {smax = 24 : si8, smin = 0 : si8, umax = 24 : ui8, umin = 0 : ui8}
func.func @test() -> i8 {
%cst1 = arith.constant 1 : i8
%i8val = test.with_bounds { umin = 0 : i8, umax = 12 : i8, smin = 0 : i8, smax = 12 : i8 } : i8
@@ -87,7 +87,7 @@ func.func @test() -> i8 {
// -----
// CHECK-LABEL: func @test
-// CHECK: test.reflect_bounds {smax = 127 : i8, smin = -128 : i8, umax = -1 : i8, umin = 0 : i8}
+// CHECK: test.reflect_bounds {smax = 127 : si8, smin = -128 : si8, umax = 255 : ui8, umin = 0 : ui8}
func.func @test() -> i8 {
%cst1 = arith.constant 1 : i8
%i8val = test.with_bounds { umin = 0 : i8, umax = 127 : i8, smin = 0 : i8, smax = 127 : i8 } : i8
diff --git a/mlir/test/Dialect/IRDL/invalid.irdl.mlir b/mlir/test/Dialect/IRDL/invalid.irdl.mlir
index d62bb498a7ad..f207d31cf158 100644
--- a/mlir/test/Dialect/IRDL/invalid.irdl.mlir
+++ b/mlir/test/Dialect/IRDL/invalid.irdl.mlir
@@ -6,7 +6,7 @@ func.func private @foo()
irdl.dialect @testd {
irdl.type @type {
- // expected-error@+1 {{'@foo' does not refer to a type or attribute definition}}
+ // expected-error@+1 {{symbol '@foo' not found}}
%0 = irdl.base @foo
irdl.parameters(%0)
}
@@ -41,3 +41,18 @@ irdl.dialect @testd {
irdl.parameters(%0)
}
}
+
+// -----
+
+irdl.dialect @invalid_parametric {
+ irdl.operation @foo {
+ // expected-error@+1 {{symbol '@not_a_type_or_attr' does not refer to a type or attribute definition}}
+ %param = irdl.parametric @not_a_type_or_attr<>
+ irdl.results(%param)
+ }
+
+ irdl.operation @not_a_type_or_attr {
+ %param = irdl.is i1
+ irdl.results(%param)
+ }
+}
diff --git a/mlir/test/Dialect/Linalg/transform-op-specialize.mlir b/mlir/test/Dialect/Linalg/transform-op-specialize.mlir
index 8a22c115f311..35679db7412f 100644
--- a/mlir/test/Dialect/Linalg/transform-op-specialize.mlir
+++ b/mlir/test/Dialect/Linalg/transform-op-specialize.mlir
@@ -141,3 +141,28 @@ module attributes {transform.with_named_sequence} {
transform.yield
}
}
+
+// -----
+
+#map = affine_map<(d0, d1) -> ()>
+#map1 = affine_map<(d0, d1) -> (d0, d1)>
+func.func @linalg_generic_fill(%arg0: tensor<7x7xf32>) -> tensor<7x7xf32> {
+ %cst = arith.constant 0.000000e+00 : f32
+ %0 = linalg.generic {indexing_maps = [#map, #map1], iterator_types = ["parallel", "parallel"]} ins(%cst : f32) outs(%arg0 : tensor<7x7xf32>) {
+ ^bb0(%in: f32, %out: f32):
+ linalg.yield %in : f32
+ } -> tensor<7x7xf32>
+ return %0 : tensor<7x7xf32>
+}
+// CHECK-LABEL: linalg_generic_fill
+// CHECK-SAME: %[[ARG0:.+]]: tensor<7x7xf32>) -> tensor<7x7xf32>
+// CHECK: %[[CST:.+]] = arith.constant 0.000000e+00 : f32
+// CHECK: %{{.*}} = linalg.fill ins(%[[CST]] : f32) outs(%[[ARG0]] : tensor<7x7xf32>) -> tensor<7x7xf32>
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg1: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg1 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.specialize %0 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_binary.mlir b/mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_binary.mlir
new file mode 100644
index 000000000000..d45025de931c
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_binary.mlir
@@ -0,0 +1,76 @@
+// RUN: mlir-opt --transform-interpreter --split-input-file --verify-diagnostics %s | FileCheck %s
+
+#map = affine_map<(d0, d1) -> (d0, d1)>
+func.func @specialize_add(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ %0 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) {
+ ^bb0(%in: f32, %in_0: f32, %out: f32):
+ %1 = arith.addf %in, %in_0 : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+// CHECK-LABEL: specialize_add
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xf32>, %[[ARG1:.+]]: tensor<?x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>) -> tensor<?x?xf32>
+// CHECK-NOT: linalg.generic
+// CHECK: linalg.add ins(%[[ARG0]], %[[ARG1]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[ARG2]] : tensor<?x?xf32>) -> tensor<?x?xf32>
+
+func.func @specialize_sub(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ %0 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) {
+ ^bb0(%in: f32, %in_0: f32, %out: f32):
+ %1 = arith.subf %in, %in_0 : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+// CHECK-LABEL: specialize_sub
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xf32>, %[[ARG1:.+]]: tensor<?x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>) -> tensor<?x?xf32>
+// CHECK-NOT: linalg.generic
+// CHECK: linalg.sub ins(%[[ARG0]], %[[ARG1]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[ARG2]] : tensor<?x?xf32>) -> tensor<?x?xf32>
+
+func.func @specialize_sub_swapped_operands(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ %0 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) {
+ ^bb0(%in: f32, %in_0: f32, %out: f32):
+ %1 = arith.subf %in_0, %in : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+// CHECK-LABEL: specialize_sub
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xf32>, %[[ARG1:.+]]: tensor<?x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>) -> tensor<?x?xf32>
+// CHECK-NOT: linalg.generic
+// CHECK: linalg.sub ins(%[[ARG1]], %[[ARG0]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[ARG2]] : tensor<?x?xf32>) -> tensor<?x?xf32>
+
+func.func @specialize_mul(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ %0 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) {
+ ^bb0(%in: f32, %in_0: f32, %out: f32):
+ %1 = arith.mulf %in, %in_0 : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+// CHECK-LABEL: specialize_mul
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xf32>, %[[ARG1:.+]]: tensor<?x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>) -> tensor<?x?xf32>
+// CHECK-NOT: linalg.generic
+// CHECK: linalg.mul ins(%[[ARG0]], %[[ARG1]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[ARG2]] : tensor<?x?xf32>) -> tensor<?x?xf32>
+
+func.func @specialize_div(%arg0: tensor<?x?xf32>, %arg1: tensor<?x?xf32>, %arg2: tensor<?x?xf32>) -> tensor<?x?xf32> {
+ %0 = linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel", "parallel"]} ins(%arg0, %arg1 : tensor<?x?xf32>, tensor<?x?xf32>) outs(%arg2 : tensor<?x?xf32>) {
+ ^bb0(%in: f32, %in_0: f32, %out: f32):
+ %1 = arith.divf %in, %in_0 : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?xf32>
+ return %0 : tensor<?x?xf32>
+}
+// CHECK-LABEL: specialize_div
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?xf32>, %[[ARG1:.+]]: tensor<?x?xf32>, %[[ARG2:.+]]: tensor<?x?xf32>) -> tensor<?x?xf32>
+// CHECK-NOT: linalg.generic
+// CHECK: linalg.div ins(%[[ARG0]], %[[ARG1]] : tensor<?x?xf32>, tensor<?x?xf32>) outs(%[[ARG2]] : tensor<?x?xf32>) -> tensor<?x?xf32>
+
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.specialize %0 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_unary.mlir b/mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_unary.mlir
new file mode 100644
index 000000000000..89a8baa453e9
--- /dev/null
+++ b/mlir/test/Dialect/Linalg/transform-op-specialize_elemwise_unary.mlir
@@ -0,0 +1,25 @@
+// RUN: mlir-opt --transform-interpreter --split-input-file --verify-diagnostics %s | FileCheck %s
+
+#umap = affine_map<(d0, d1, d2) -> (d0, d1, d2)>
+func.func @specialize_exp(%arg0: tensor<?x?x?xf32>, %arg1: tensor<?x?x?xf32>) -> tensor<?x?x?xf32> {
+ %0 = linalg.generic
+ {indexing_maps = [#umap, #umap], iterator_types = ["parallel", "parallel","parallel"]}
+ ins(%arg0 : tensor<?x?x?xf32>) outs(%arg1 : tensor<?x?x?xf32>) {
+ ^bb0(%in: f32, %out: f32):
+ %1 = math.exp %in : f32
+ linalg.yield %1 : f32
+ } -> tensor<?x?x?xf32>
+ return %0 : tensor<?x?x?xf32>
+}
+// CHECK-LABEL: specialize_exp
+// CHECK-SAME: %[[ARG0:.+]]: tensor<?x?x?xf32>, %[[ARG1:.+]]: tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
+// CHECK-NOT: linalg.generic
+// CHECK: linalg.exp ins(%[[ARG0]] : tensor<?x?x?xf32>) outs(%[[ARG1]] : tensor<?x?x?xf32>) -> tensor<?x?x?xf32>
+
+module attributes {transform.with_named_sequence} {
+ transform.named_sequence @__transform_main(%arg0: !transform.any_op {transform.readonly}) {
+ %0 = transform.structured.match interface{LinalgOp} in %arg0 : (!transform.any_op) -> !transform.any_op
+ %1 = transform.structured.specialize %0 : (!transform.any_op) -> !transform.any_op
+ transform.yield
+ }
+}
diff --git a/mlir/test/Dialect/MemRef/canonicalize.mlir b/mlir/test/Dialect/MemRef/canonicalize.mlir
index f442a61dc31e..c4ff6480a4ce 100644
--- a/mlir/test/Dialect/MemRef/canonicalize.mlir
+++ b/mlir/test/Dialect/MemRef/canonicalize.mlir
@@ -692,6 +692,16 @@ func.func @self_copy(%m1: memref<?xf32>) {
// -----
+// CHECK-LABEL: func @empty_copy
+// CHECK-NEXT: return
+func.func @empty_copy(%m1: memref<0x10xf32>, %m2: memref<?x10xf32>) {
+ memref.copy %m1, %m2 : memref<0x10xf32> to memref<?x10xf32>
+ memref.copy %m2, %m1 : memref<?x10xf32> to memref<0x10xf32>
+ return
+}
+
+// -----
+
func.func @scopeMerge() {
memref.alloca_scope {
%cnt = "test.count"() : () -> index
diff --git a/mlir/test/Dialect/Tensor/canonicalize.mlir b/mlir/test/Dialect/Tensor/canonicalize.mlir
index b5a82eb3e903..914e5e8b8c4b 100644
--- a/mlir/test/Dialect/Tensor/canonicalize.mlir
+++ b/mlir/test/Dialect/Tensor/canonicalize.mlir
@@ -542,6 +542,18 @@ func.func @trivial_insert_slice(%arg0 : tensor<4x6x16x32xi8>, %arg1 : tensor<4x6
// -----
+// CHECK-LABEL: func @empty_insert_slice
+// CHECK-SAME: %[[ARG0:.[a-z0-9A-Z_]+]]: tensor<0x2xi8>
+// CHECK-SAME: %[[ARG1:.[a-z0-9A-Z_]+]]: tensor<3x3xi8>
+// CHECK-NOT: tensor.extract_slice
+// CHECK: return %[[ARG1]] : tensor<3x3xi8>
+func.func @empty_insert_slice(%arg0 : tensor<0x2xi8>, %arg1 : tensor<3x3xi8>) -> tensor<3x3xi8> {
+ %0 = tensor.insert_slice %arg0 into %arg1[0, 0] [0, 2] [1, 1] : tensor<0x2xi8> into tensor<3x3xi8>
+ return %0 : tensor<3x3xi8>
+}
+
+// -----
+
// CHECK-LABEL: func @rank_reducing_tensor_of_cast
// CHECK-SAME: %[[ARG0:.[a-z0-9A-Z_]+]]: tensor<4x6x16x32xi8>
// CHECK: %[[S:.+]] = tensor.extract_slice %arg0[0, 1, 0, 0] [1, 1, 16, 32] [1, 1, 1, 1] : tensor<4x6x16x32xi8> to tensor<16x32xi8>
diff --git a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
index e48af3cd7aac..349dc1ab31d4 100644
--- a/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
+++ b/mlir/test/Dialect/Vector/vector-transfer-permutation-lowering.mlir
@@ -46,6 +46,51 @@ func.func @permutation_with_mask_xfer_write_scalable(%arg0: vector<4x[8]xi16>, %
return
}
+// transfer_write in MaskOp case not supported.
+// CHECK-LABEL: func @masked_permutation_xfer_write_fixed_width
+// CHECK-SAME: %[[ARG_0:.*]]: tensor<?x?xf32>,
+// CHECK-SAME: %[[ARG_1:.*]]: vector<16xf32>,
+// CHECK-SAME: %[[IDX:.*]]: index,
+// CHECK-SAME: %[[MASK:.*]]: vector<16xi1>
+// CHECK-NOT: vector.transpose
+// CHECK: %[[RES:.*]] = vector.mask %[[MASK]] { vector.transfer_write %[[ARG_1]], %[[ARG_0]]{{.*}} vector<16xf32>, tensor<?x?xf32> } : vector<16xi1> -> tensor<?x?xf32>
+func.func @masked_permutation_xfer_write_fixed_width(%t: tensor<?x?xf32>, %val: vector<16xf32>, %idx: index, %mask: vector<16xi1>) -> tensor<?x?xf32> {
+ %r = vector.mask %mask { vector.transfer_write %val, %t[%idx, %idx] {permutation_map = affine_map<(d0, d1) -> (d0)>} : vector<16xf32>, tensor<?x?xf32> } : vector<16xi1> -> tensor<?x?xf32>
+ return %r : tensor<?x?xf32>
+}
+
+// CHECK-LABEL: func.func @masked_permutation_xfer_write_scalable(
+// CHECK-SAME: %[[ARG_0:.*]]: vector<4x[8]xi16>,
+// CHECK-SAME: %[[ARG_1:.*]]: tensor<?x?x?x?xf32>,
+// CHECK-SAME: %[[MASK:.*]]: vector<4x[8]xi1>)
+// CHECK-SAME: -> tensor<?x?x?x?xf32> {
+// CHECK-NOT: vector.transpose
+// CHECK: %[[R:.*]] = vector.mask %[[MASK]] { vector.transfer_write %[[ARG_0]], %[[ARG_1]]{{.*}} : vector<4x[8]xi16>, tensor<?x?x?x?xf32> } : vector<4x[8]xi1> -> tensor<?x?x?x?xf32>
+func.func @masked_permutation_xfer_write_scalable(%arg0: vector<4x[8]xi16>, %t: tensor<?x?x?x?xf32>, %mask: vector<4x[8]xi1>) -> tensor<?x?x?x?xf32> {
+ %c0 = arith.constant 0 : index
+ %r = vector.mask %mask { vector.transfer_write %arg0, %t[%c0, %c0, %c0, %c0] {in_bounds = [true, true], permutation_map = affine_map<(d0, d1, d2, d3) -> (d1, d2)>
+} : vector<4x[8]xi16>, tensor<?x?x?x?xf32> } : vector<4x[8]xi1> -> tensor<?x?x?x?xf32>
+
+ return %r : tensor<?x?x?x?xf32>
+}
+
+// transfer_write in MaskOp case not supported.
+// CHECK-LABEL: func @masked_non_permutation_xfer_write_fixed_width
+// CHECK-SAME: %[[ARG0:.*]]: tensor<?x?x?x?xf32>
+// CHECK-SAME: %[[ARG1:.*]]: vector<14x8x16xf32>
+// CHECK-SAME: %[[IDX:.*]]: index) -> tensor<?x?x?x?xf32>
+// CHECK-NOT: vector.broadcast
+// CHECK: %[[masked1:.*]] = vector.mask %0 { vector.transfer_write %[[ARG1]], %[[ARG0]]{{.*}} : vector<14x8x16xf32>, tensor<?x?x?x?xf32> } : vector<14x8x16xi1> -> tensor<?x?x?x?xf32>
+func.func @masked_non_permutation_xfer_write_fixed_width(
+ %arg0 : tensor<?x?x?x?xf32>,
+ %v1 : vector<14x8x16xf32>, %dim : index) -> tensor<?x?x?x?xf32> {
+ %c0 = arith.constant 0 : index
+ %mask = vector.create_mask %dim, %dim, %dim : vector<14x8x16xi1>
+ %0 = vector.mask %mask { vector.transfer_write %v1, %arg0[%c0, %c0, %c0, %c0] {in_bounds = [false, false, true], permutation_map = affine_map<(d0, d1, d2, d3) -> (d0, d1, d3)>} : vector<14x8x16xf32>, tensor<?x?x?x?xf32> } : vector<14x8x16xi1> -> tensor<?x?x?x?xf32>
+
+ return %0 : tensor<?x?x?x?xf32>
+}
+
///----------------------------------------------------------------------------------------
/// vector.transfer_read
///----------------------------------------------------------------------------------------
@@ -101,6 +146,37 @@ func.func @permutation_with_mask_xfer_read_scalable(%mem: memref<?x?xf32>, %dim_
return %1 : vector<8x[4]x2xf32>
}
+// transfer_read in MaskOp case not supported.
+// CHECK-LABEL: func @masked_permutation_xfer_read_fixed_width
+// CHECK-SAME: %[[ARG_0:.*]]: tensor<?x1xf32>,
+// CHECK-SAME: %[[ARG_1:.*]]: vector<4x1xi1>
+// CHECK-NOT: vector.transpose
+// CHECK: vector.mask %[[ARG_1]] { vector.transfer_read %[[ARG_0]]{{.*}}: tensor<?x1xf32>, vector<1x4x4xf32> } : vector<4x1xi1> -> vector<1x4x4xf32>
+func.func @masked_permutation_xfer_read_fixed_width(%arg0: tensor<?x1xf32>, %mask : vector<4x1xi1>) {
+ %cst = arith.constant 0.000000e+00 : f32
+ %c0 = arith.constant 0 : index
+ %3 = vector.mask %mask { vector.transfer_read %arg0[%c0, %c0], %cst {permutation_map = affine_map<(d0, d1) -> (d1, 0, d0)>} : tensor<?x1xf32>, vector<1x4x4xf32> } : vector<4x1xi1> -> vector<1x4x4xf32>
+ call @test.some_use(%3) : (vector<1x4x4xf32>) -> ()
+ return
+}
+func.func private @test.some_use(vector<1x4x4xf32>)
+
+// CHECK-LABEL: func.func @masked_permutation_xfer_read_scalable(
+// CHECK-SAME: %[[ARG_0:.*]]: tensor<?x?xf32>,
+// CHECK-SAME: %[[MASK:.*]]: vector<2x[4]xi1>) -> vector<8x[4]x2xf32> {
+// CHECK-NOT: vector.transpose
+// CHECK: %[[T_READ:.*]] = vector.mask %[[MASK]] { vector.transfer_read %[[ARG_0]]{{.*}} : tensor<?x?xf32>, vector<8x[4]x2xf32> } : vector<2x[4]xi1> -> vector<8x[4]x2xf32>
+func.func @masked_permutation_xfer_read_scalable(%t: tensor<?x?xf32>, %mask : vector<2x[4]xi1>) -> vector<8x[4]x2xf32> {
+
+ %c0 = arith.constant 0 : index
+ %cst_0 = arith.constant 0.000000e+00 : f32
+
+ %1 = vector.mask %mask { vector.transfer_read %t[%c0, %c0], %cst_0
+ {in_bounds = [true, true, true], permutation_map = affine_map<(d0, d1) -> (0, d1, d0)>}
+ : tensor<?x?xf32>, vector<8x[4]x2xf32> } :vector<2x[4]xi1> -> vector<8x[4]x2xf32>
+ return %1 : vector<8x[4]x2xf32>
+}
+
module attributes {transform.with_named_sequence} {
transform.named_sequence @__transform_main(%module_op: !transform.any_op {transform.readonly}) {
%f = transform.structured.match ops{["func.func"]} in %module_op
diff --git a/mlir/test/lib/Dialect/Test/TestOpDefs.cpp b/mlir/test/lib/Dialect/Test/TestOpDefs.cpp
index bfee0391f670..b058a8e1abbc 100644
--- a/mlir/test/lib/Dialect/Test/TestOpDefs.cpp
+++ b/mlir/test/lib/Dialect/Test/TestOpDefs.cpp
@@ -706,11 +706,20 @@ void TestReflectBoundsOp::inferResultRanges(
const ConstantIntRanges &range = argRanges[0];
MLIRContext *ctx = getContext();
Builder b(ctx);
- auto intTy = getType();
- setUminAttr(b.getIntegerAttr(intTy, range.umin()));
- setUmaxAttr(b.getIntegerAttr(intTy, range.umax()));
- setSminAttr(b.getIntegerAttr(intTy, range.smin()));
- setSmaxAttr(b.getIntegerAttr(intTy, range.smax()));
+ Type sIntTy, uIntTy;
+ // For plain `IntegerType`s, we can derive the appropriate signed and unsigned
+ // Types for the Attributes.
+ if (auto intTy = llvm::dyn_cast<IntegerType>(getType())) {
+ unsigned bitwidth = intTy.getWidth();
+ sIntTy = b.getIntegerType(bitwidth, /*isSigned=*/true);
+ uIntTy = b.getIntegerType(bitwidth, /*isSigned=*/false);
+ } else
+ sIntTy = uIntTy = getType();
+
+ setUminAttr(b.getIntegerAttr(uIntTy, range.umin()));
+ setUmaxAttr(b.getIntegerAttr(uIntTy, range.umax()));
+ setSminAttr(b.getIntegerAttr(sIntTy, range.smin()));
+ setSmaxAttr(b.getIntegerAttr(sIntTy, range.smax()));
setResultRanges(getResult(), range);
}
diff --git a/offload/plugins-nextgen/exports b/offload/plugins-nextgen/exports
deleted file mode 100644
index cc7beda183af..000000000000
--- a/offload/plugins-nextgen/exports
+++ /dev/null
@@ -1,6 +0,0 @@
-VERS1.0 {
- global:
- __tgt_rtl*;
- local:
- *;
-};
diff --git a/utils/bazel/llvm-project-overlay/llvm/driver.bzl b/utils/bazel/llvm-project-overlay/llvm/driver.bzl
index 10796d919834..a57a14ebd5f8 100644
--- a/utils/bazel/llvm-project-overlay/llvm/driver.bzl
+++ b/utils/bazel/llvm-project-overlay/llvm/driver.bzl
@@ -39,6 +39,7 @@ _EXTRA_ALIASES = {
"clang": ["clang++", "clang-cl", "clang-cpp"],
"lld": ["ld", "lld-link", "ld.lld", "ld64.lld", "wasm-ld"],
"llvm-ar": ["ranlib", "lib", "dlltool"],
+ "llvm-cxxfilt": ["c++filt"],
"llvm-objcopy": ["bitcode-strip", "install-name-tool", "strip"],
"llvm-objdump": ["otool"],
"llvm-rc": ["windres"],
diff --git a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
index 71fca298e9b9..fc449e9010ae 100644
--- a/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/mlir/BUILD.bazel
@@ -10145,6 +10145,10 @@ td_library(
srcs = [
"include/mlir/Dialect/OpenACCMPCommon/Interfaces/AtomicInterfaces.td",
"include/mlir/Dialect/OpenMP/OmpCommon.td",
+ "include/mlir/Dialect/OpenMP/OpenMPAttrDefs.td",
+ "include/mlir/Dialect/OpenMP/OpenMPDialect.td",
+ "include/mlir/Dialect/OpenMP/OpenMPEnums.td",
+ "include/mlir/Dialect/OpenMP/OpenMPOpBase.td",
"include/mlir/Dialect/OpenMP/OpenMPOps.td",
"include/mlir/Dialect/OpenMP/OpenMPOpsInterfaces.td",
"include/mlir/Dialect/OpenMP/OpenMPTypeInterfaces.td",
@@ -11157,6 +11161,7 @@ cc_library(
":LinalgStructuredOpsIncGen",
":LinalgUtils",
":MaskableOpInterface",
+ ":MathDialect",
":MemRefDialect",
":MemRefTransforms",
":MeshDialect",